Merge topic 'ComputeBranchDecompositionInfo'

d3db3d1dd Implement computing branch decomposition info and added unit test

Acked-by: Kitware Robot <kwrobot@kitware.com>
Merge-request: !3112
This commit is contained in:
Abdelilah Essiari 2023-08-29 15:23:18 +00:00 committed by Kitware Robot
commit e0830a7af8
17 changed files with 1599 additions and 20 deletions

@ -242,7 +242,9 @@ cmake_dependent_option(VTKm_ENABLE_KOKKOS_THRUST "Enable Kokkos thrust support (
# when building VTK-m.
include(VTKmCompilerFlags)
if (VTKM_EXAMPLE_CONTOURTREE_ENABLE_DEBUG_PRINT)
add_compile_definitions(DEBUG_PRINT)
endif()
#-----------------------------------------------------------------------------
# We need to check and see if git lfs is installed so that test data will
# be available for use

@ -440,6 +440,7 @@ int main(int argc, char* argv[])
return 255;
}
VTKM_LOG_S(exampleLogLevel,
std::endl
<< " ------------ Settings -----------" << std::endl
@ -659,14 +660,37 @@ int main(int argc, char* argv[])
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
auto ds = bd_result.GetPartition(ds_no);
std::string branchDecompositionFileName = std::string("BranchDecomposition_Rank_") +
std::string branchDecompositionIntermediateFileName =
std::string("BranchDecompositionIntermediate_Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(ds_no)) + std::string(".txt");
std::ofstream treeStream(branchDecompositionFileName.c_str());
treeStream
std::ofstream treeStreamIntermediate(branchDecompositionIntermediateFileName.c_str());
treeStreamIntermediate
<< vtkm::filter::scalar_topology::HierarchicalVolumetricBranchDecomposer::PrintBranches(
ds);
std::string branchDecompositionFileName = std::string("BranchDecomposition_Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(ds_no)) + std::string(".txt");
std::ofstream treeStream(branchDecompositionFileName.c_str());
auto upperEndGRId = ds.GetField("UpperEndGlobalRegularIds")
.GetData()
.AsArrayHandle<vtkm::cont::ArrayHandle<vtkm::Id>>()
.ReadPortal();
auto lowerEndGRId = ds.GetField("LowerEndGlobalRegularIds")
.GetData()
.AsArrayHandle<vtkm::cont::ArrayHandle<vtkm::Id>>()
.ReadPortal();
vtkm::Id nBranches = upperEndGRId.GetNumberOfValues();
for (vtkm::Id branch = 0; branch < nBranches; ++branch)
{
treeStream << std::setw(12) << upperEndGRId.Get(branch) << std::setw(14)
<< lowerEndGRId.Get(branch) << std::endl;
}
}
}
else

@ -24,7 +24,7 @@ if [ -t 1 ]; then
fi
echo "Removing previously generated files"
rm *.log *.dat
rm -f *.log *.dat
echo "Copying target file "$1 "into current directory"
filename=${1##*/}
@ -42,16 +42,32 @@ n_parts=$(($2*$2))
mpirun -np 2 --oversubscribe ./ContourTree_Distributed --vtkm-device Any --preSplitFiles --saveOutputData --augmentHierarchicalTree --computeVolumeBranchDecomposition --numBlocks=${n_parts} ${fileroot}_part_%d_of_${n_parts}.txt
rm ${fileroot}_part_*_of_${n_parts}.txt
echo "Compiling Outputs"
sort -u BranchDecomposition_Rank_*.txt > outsort${fileroot}_$2x$2.txt
cat outsort${fileroot}_$2x$2.txt | ./BranchCompiler | sort > bcompile${fileroot}_$2x$2.txt
rm BranchDecomposition_Rank_*.txt outsort${fileroot}_$2x$2.txt
# ground result
sort -u ${GTCT_DIR}/branch_decomposition_volume_hybrid_${fileroot}.txt > sorted_ground${fileroot}_$2x$2.txt
echo "Diffing"
echo diff bcompile${fileroot}_$2x$2.txt ${GTCT_DIR}/branch_decomposition_volume_hybrid_${fileroot}.txt
diff bcompile${fileroot}_$2x$2.txt ${GTCT_DIR}/branch_decomposition_volume_hybrid_${fileroot}.txt
echo "Handling BranchDecomposition Outputs"
sort -u BranchDecomposition_Rank_*.txt > sorted_branch_decomposition${fileroot}_$2x$2.txt
rm BranchDecomposition_Rank_*.txt
if test $? -eq 0; then echo "${GREEN}Pass${NC}"; rm bcompile${fileroot}_$2x$2.txt; else echo "${RED}FAIL${NC}"; fi;
diff sorted_branch_decomposition${fileroot}_$2x$2.txt sorted_ground${fileroot}_$2x$2.txt
diff1=$?
if test $diff1 -eq 0; then echo "${GREEN}Pass${NC}"; rm sorted_branch_decomposition${fileroot}_$2x$2.txt; else echo "${RED}FAIL${NC}"; fi;
echo "Handling Intermediate BranchDecomposition Outputs"
sort -u BranchDecompositionIntermediate_Rank_*.txt | ./BranchCompiler | sort -u > sorted_branch_decomposition_intermediate${fileroot}_$2x$2.txt
rm BranchDecompositionIntermediate_Rank_*.txt
echo diff sorted_branch_decomposition_intermediate${fileroot}_$2x$2.txt ${GTCT_DIR}/branch_decomposition_volume_hybrid_${fileroot}.txt
diff sorted_branch_decomposition_intermediate${fileroot}_$2x$2.txt sorted_ground${fileroot}_$2x$2.txt
diff2=$?
if test $diff2 -eq 0; then echo "Intermediate:${GREEN}Pass${NC}"; rm sorted_branch_decomposition_intermediate${fileroot}_$2x$2.txt; else echo "Intermediate:${RED}FAIL${NC}"; fi;
if [ $diff1 -eq 0 ] && [ $diff2 -eq 0 ]
then
rm sorted_ground${fileroot}_$2x$2.txt
rm *.log
fi
# echo "Generating Dot files"
# ./makedot.sh

@ -18,6 +18,7 @@ set(scalar_topology_sources
internal/BranchDecompositionBlock.cxx
internal/ComputeBlockIndices.cxx
internal/ComputeDistributedBranchDecompositionFunctor.cxx
internal/ExchangeBranchEndsFunctor.cxx
ContourTreeUniform.cxx
ContourTreeUniformAugmented.cxx
ContourTreeUniformDistributed.cxx

@ -12,6 +12,9 @@
#include <vtkm/filter/scalar_topology/DistributedBranchDecompositionFilter.h>
#include <vtkm/filter/scalar_topology/internal/BranchDecompositionBlock.h>
#include <vtkm/filter/scalar_topology/internal/ComputeDistributedBranchDecompositionFunctor.h>
#include <vtkm/filter/scalar_topology/internal/ExchangeBranchEndsFunctor.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/ArrayTransforms.h>
// vtkm includes
#include <vtkm/cont/Timer.h>
@ -158,6 +161,7 @@ VTKM_CONT vtkm::cont::PartitionedDataSet DistributedBranchDecompositionFilter::D
ghosts,
diyDivisions);
// TODO/FIXME: Check what happened here and possibly eliminate!
for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++)
{
}
@ -209,16 +213,16 @@ VTKM_CONT vtkm::cont::PartitionedDataSet DistributedBranchDecompositionFilter::D
{
std::stringstream rs;
vtkm::worklet::contourtree_augmented::PrintHeader(
b->HierarchicalVolumetricBranchDecomposer.BestUpSupernode.GetNumberOfValues(), rs);
b->VolumetricBranchDecomposer.BestUpSupernode.GetNumberOfValues(), rs);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BestUpSupernode", b->HierarchicalVolumetricBranchDecomposer.BestUpSupernode, -1, rs);
"BestUpSupernode", b->VolumetricBranchDecomposer.BestUpSupernode, -1, rs);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BestDownSupernode", b->HierarchicalVolumetricBranchDecomposer.BestDownSupernode, -1, rs);
"BestDownSupernode", b->VolumetricBranchDecomposer.BestDownSupernode, -1, rs);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BestUpVolume", b->HierarchicalVolumetricBranchDecomposer.BestUpVolume, -1, rs);
"BestUpVolume", b->VolumetricBranchDecomposer.BestUpVolume, -1, rs);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BestDownVolume", b->HierarchicalVolumetricBranchDecomposer.BestDownVolume, -1, rs);
VTKM_LOG_S(this->TreeLogLevel, rs.str());
"BestDownVolume", b->VolumetricBranchDecomposer.BestDownVolume, -1, rs);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, rs.str());
}
#endif
});
@ -250,6 +254,73 @@ VTKM_CONT vtkm::cont::PartitionedDataSet DistributedBranchDecompositionFilter::D
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
//// Branch decompisition stored in branch root array b->BranchRoots
branch_decomposition_master.foreach (
[&](BranchDecompositionBlock* b, const vtkmdiy::Master::ProxyWithLink&) {
//// STEP 1: Find ends of branches locally
//// STEP 1A: Find upper end of branch locally
//// Segmented sort by branch ID of value of upper node of superarc
//// Sort superarcs by value value of upper node, segmenting by branchID
//// Upper node determined using ascending flag of superarc array
//// NOTE: Superarc array is stored in b->HierarchicalContourTreeDataSet
//// if ascending flag is NOT set, upper node is the source node of the superarc, whose
//// supernode ID is guaranteed to be the same as the ID of the superarc
//// if ascending flag is set, upper node is the target node of the superarc, which is stored
//// in the superarc array but maskIndex must be called to strip out flags
//// Create index array with IDs of all superarcs:
//// * Size is Supernodes.Size()-1 or Superarcs.Size()-1 because of last node as NULL superarc
//// * Fill vtk-m equivalent of std::iota
//// Segmented sort of the "superarcs" array sort by three keys:
//// (1) branchID (most senior superarc),
//// (2) data value
//// (3) global regular id (for simulation of simplicity)
//// Find highest vertex for branch (i.e., before branchID increases), special case for
//// end of array.
//
//// Based on level of the block, the attachment points (if not the highest level) or the root of the contour tree
//// their Superarcs should always be NO_SUCH_ELEMENT (NSE)
//// If attachment points, their Superparents should hold the superarc ID they attach to
//// STEP 1B: Find lower end of branch locally
//// Inverse to STEP 1A
//
//// STEP 1C: Compress out duplicate branch IDs
//// * Temporary array "knownBranches" with size of superarcs array, initialize to NO_SUCH_ELEMENT
//// * Every highest vertex we find in STEP 1A has a branch ID, use that ID to set knownBranches[bID] = bID;
//// . * Remove/compress out NO_SUCH_ELEMENT entries
//// . * Array now is a list of all known (to the block) branches
//
//// STEP 2: Look up (and add) global regular ID, value, and terminal volume both intrinsic and dependent
//// Target: get the information to explicitly extract the branch
//// NOTE: Both STEP 1 and STEP 2 are implemented in b->VolumetricBranchDecomposer.CollectBranches()
//// =================================================================
//// Pipeline:
//// Each block now has a list of all the branchRoot IDs;
//// convert it into a list of global regular ids for each branch;
//// obtain the value based on the local regular id;
//// dependent volume is indexed by the superarc id; however, it's the superarc id of the last superarc on the branch
//// and we don't know the direction of the superarc
//// As a result, the top supernode can either be the source or the destination of the superarc
//// and, the dependent volume could be at either end
//// "IsAscending(superarc)" tells the direction of the superarc, and consequently the direction of the dependent volume
//// Therefore, we treat the highest end and lowest end as the SUPERARC rather than nodes due to direction information
//// Moreover, for all branches other than the senior most, either the top end or the bottom end is a leaf, and the other end is the inner end (saddle)
//// Leaves can be detected because the dependent weight is always totalVolume(mesh)-1;
//// Senior branch will have leaves on both ends.
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
"CollectBranches for local block " << b->GlobalBlockId << std::endl);
#endif
const vtkm::cont::DataSet& ds = input.GetPartition(b->LocalBlockNo);
b->VolumetricBranchDecomposer.CollectBranches(ds, b->BranchRoots);
});
// Now we have collected the branches, we do a global reduction to exchance branch end information
// across all compute ranks
vtkmdiy::reduce(branch_decomposition_master,
assigner,
partners,
vtkm::filter::scalar_topology::internal::ExchangeBranchEndsFunctor{});
std::vector<vtkm::cont::DataSet> outputDataSets(input.GetNumberOfPartitions());
// Copy input data set to output
// TODO/FIXME: Should we really do this? Or just output branchRoots
@ -260,11 +331,22 @@ VTKM_CONT vtkm::cont::PartitionedDataSet DistributedBranchDecompositionFilter::D
outputDataSets[ds_no] = input.GetPartition(ds_no);
}
branch_decomposition_master.foreach (
[&](BranchDecompositionBlock* b, const vtkmdiy::Master::ProxyWithLink&) {
vtkm::cont::Field branchRootField(
"BranchRoots", vtkm::cont::Field::Association::WholeDataSet, b->BranchRoots);
outputDataSets[b->LocalBlockNo].AddField(branchRootField);
// Store the upper end and lower end global regular IDs of branches in the output
vtkm::cont::Field UpperEndGRIdField("UpperEndGlobalRegularIds",
vtkm::cont::Field::Association::WholeDataSet,
b->VolumetricBranchDecomposer.UpperEndGRId);
outputDataSets[b->LocalBlockNo].AddField(UpperEndGRIdField);
vtkm::cont::Field LowerEndGRIdField("LowerEndGlobalRegularIds",
vtkm::cont::Field::Association::WholeDataSet,
b->VolumetricBranchDecomposer.LowerEndGRId);
outputDataSets[b->LocalBlockNo].AddField(LowerEndGRIdField);
});
timingsStream << " " << std::setw(38) << std::left

@ -12,6 +12,7 @@ set(headers
BranchDecompositionBlock.h
ComputeBlockIndices.h
ComputeDistributedBranchDecompositionFunctor.h
ExchangeBranchEndsFunctor.h
)
#-----------------------------------------------------------------------------

@ -0,0 +1,329 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#include <vtkm/filter/scalar_topology/internal/ExchangeBranchEndsFunctor.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/BranchEndGlobalUpdateWorklet.h>
#include <vtkm/Types.h>
#ifdef DEBUG_PRINT
#define DEBUG_PRINT_COMBINED_BLOCK_IDS
#endif
namespace vtkm
{
namespace filter
{
namespace scalar_topology
{
namespace internal
{
void ExchangeBranchEndsFunctor::operator()(
BranchDecompositionBlock* b,
const vtkmdiy::ReduceProxy& rp, // communication proxy
const vtkmdiy::RegularSwapPartners& // partners of the current block (unused)
) const
{
// Get our rank and DIY id
const auto selfid = rp.gid();
// Aliases to reduce verbosity
auto& branchDecomposer = b->VolumetricBranchDecomposer;
using IdArrayType = vtkm::worklet::contourtree_augmented::IdArrayType;
vtkm::cont::Invoker invoke;
std::vector<int> incoming;
rp.incoming(incoming);
for (const int ingid : incoming)
{
// NOTE/IMPORTANT: In each round we should have only one swap partner (despite for-loop here).
// If that assumption does not hold, it will break things.
// NOTE/IMPORTANT: This assumption only holds if the number of blocks is a power of two.
// Otherwise, we may need to process more than one incoming block
if (ingid != selfid)
{
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
int incomingGlobalBlockId;
rp.dequeue(ingid, incomingGlobalBlockId);
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
"Combining local block " << b->GlobalBlockId << " with incoming block "
<< incomingGlobalBlockId);
#endif
// Receive data from swap partner
// IMPORTANT: Needs to be exact same order as enqueue later in code
IdArrayType incomingBranchRootGRId;
rp.dequeue(ingid, incomingBranchRootGRId);
IdArrayType incomingUpperEndGRId;
rp.dequeue(ingid, incomingUpperEndGRId);
IdArrayType incomingLowerEndGRId;
rp.dequeue(ingid, incomingLowerEndGRId);
vtkm::cont::UnknownArrayHandle incomingUpperEndValue;
rp.dequeue(ingid, incomingUpperEndValue);
vtkm::cont::UnknownArrayHandle incomingLowerEndValue;
rp.dequeue(ingid, incomingLowerEndValue);
IdArrayType incomingUpperEndSuperarcId;
rp.dequeue(ingid, incomingUpperEndSuperarcId);
IdArrayType incomingLowerEndSuperarcId;
rp.dequeue(ingid, incomingLowerEndSuperarcId);
IdArrayType incomingUpperEndIntrinsicVolume;
rp.dequeue(ingid, incomingUpperEndIntrinsicVolume);
IdArrayType incomingLowerEndIntrinsicVolume;
rp.dequeue(ingid, incomingLowerEndIntrinsicVolume);
IdArrayType incomingUpperEndDependentVolume;
rp.dequeue(ingid, incomingUpperEndDependentVolume);
IdArrayType incomingLowerEndDependentVolume;
rp.dequeue(ingid, incomingLowerEndDependentVolume);
/// Superarc and Branch IDs are given based on the hierarchical level
/// Shared branches should lie on the smaller ID side of the branch array consecutively
/// We filter out shared branches first
/// because we need data to be in the same length to apply worklet
IdArrayType oneIfSharedBranch;
vtkm::cont::Algorithm::Transform(
incomingBranchRootGRId, branchDecomposer.BranchRootGRId, oneIfSharedBranch, vtkm::Equal());
vtkm::Id nSharedBranches = vtkm::cont::Algorithm::Reduce(oneIfSharedBranch, 0, vtkm::Sum());
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
std::stringstream precheckStream;
vtkm::worklet::contourtree_augmented::PrintHeader(
branchDecomposer.BranchRoot.GetNumberOfValues(), precheckStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"SelfBranchRootGRId", branchDecomposer.BranchRootGRId, -1, precheckStream);
precheckStream << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(incomingBranchRootGRId.GetNumberOfValues(),
precheckStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"OtherBranchRootGRId", incomingBranchRootGRId, -1, precheckStream);
precheckStream << std::endl;
if (nSharedBranches > 0)
{
vtkm::worklet::contourtree_augmented::PrintHeader(nSharedBranches, precheckStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"OneIfSharedBranch", oneIfSharedBranch, -1, precheckStream);
precheckStream << std::endl;
}
VTKM_LOG_S(vtkm::cont::LogLevel::Info, precheckStream.str());
#endif
// Now apply worklet
// Input field should be sharedBranchGRId because its size is nSharedBranches
// Worklet task:
// 1. decide the shared upper node and lower node
// 2. update local information if necessary
vtkm::cont::ArrayHandleIndex sharedBranchesIndices(nSharedBranches);
auto resolveValueType = [&](const auto& inArray) {
using InArrayHandleType = std::decay_t<decltype(inArray)>;
using ValueType = typename InArrayHandleType::ValueType;
// Need to cast other data value arrays into known value types
auto concreteSelfLowerEndValue = vtkm::cont::make_ArrayHandleView(
branchDecomposer.LowerEndValue.AsArrayHandle<InArrayHandleType>(), 0, nSharedBranches);
auto concreteOtherLowerEndValue = vtkm::cont::make_ArrayHandleView(
incomingLowerEndValue.AsArrayHandle<InArrayHandleType>(), 0, nSharedBranches);
// We need ArrayHandleView to restrict the array to be the same size as nSharedBranches
auto selfLowerEndGRId =
vtkm::cont::make_ArrayHandleView(branchDecomposer.LowerEndGRId, 0, nSharedBranches);
auto otherLowerEndGRId =
vtkm::cont::make_ArrayHandleView(incomingLowerEndGRId, 0, nSharedBranches);
auto selfLowerEndSuperarcId =
vtkm::cont::make_ArrayHandleView(branchDecomposer.LowerEndSuperarcId, 0, nSharedBranches);
auto otherLowerEndSuperarcId =
vtkm::cont::make_ArrayHandleView(incomingLowerEndSuperarcId, 0, nSharedBranches);
auto selfLowerEndIntrinsicVolume = vtkm::cont::make_ArrayHandleView(
branchDecomposer.LowerEndIntrinsicVolume, 0, nSharedBranches);
auto otherLowerEndIntrinsicVolume =
vtkm::cont::make_ArrayHandleView(incomingLowerEndIntrinsicVolume, 0, nSharedBranches);
auto selfLowerEndDependentVolume = vtkm::cont::make_ArrayHandleView(
branchDecomposer.LowerEndDependentVolume, 0, nSharedBranches);
auto otherLowerEndDependentVolume =
vtkm::cont::make_ArrayHandleView(incomingLowerEndDependentVolume, 0, nSharedBranches);
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::
UpdateBranchEndByExchangeWorklet<ValueType, true>
updateLowerEndWorklet;
invoke(updateLowerEndWorklet,
sharedBranchesIndices,
selfLowerEndGRId,
otherLowerEndGRId,
concreteSelfLowerEndValue,
concreteOtherLowerEndValue,
selfLowerEndSuperarcId,
otherLowerEndSuperarcId,
selfLowerEndIntrinsicVolume,
otherLowerEndIntrinsicVolume,
selfLowerEndDependentVolume,
otherLowerEndDependentVolume);
// write the self lower end value array back to branchDecomposer.LowerEndValue
// no need to write this explicitly, because they share the same memory
// For upper end, the branchDecomposer.UpperEndValue is already casted
// So we can omit the step to cast its type
auto concreteSelfUpperEndValue =
vtkm::cont::make_ArrayHandleView(inArray, 0, nSharedBranches);
auto concreteOtherUpperEndValue = vtkm::cont::make_ArrayHandleView(
incomingUpperEndValue.AsArrayHandle<InArrayHandleType>(), 0, nSharedBranches);
// We need ArrayHandleView to restrict the array to be the same size as nSharedBranches
auto selfUpperEndGRId =
vtkm::cont::make_ArrayHandleView(branchDecomposer.UpperEndGRId, 0, nSharedBranches);
auto otherUpperEndGRId =
vtkm::cont::make_ArrayHandleView(incomingUpperEndGRId, 0, nSharedBranches);
auto selfUpperEndSuperarcId =
vtkm::cont::make_ArrayHandleView(branchDecomposer.UpperEndSuperarcId, 0, nSharedBranches);
auto otherUpperEndSuperarcId =
vtkm::cont::make_ArrayHandleView(incomingUpperEndSuperarcId, 0, nSharedBranches);
auto selfUpperEndIntrinsicVolume = vtkm::cont::make_ArrayHandleView(
branchDecomposer.UpperEndIntrinsicVolume, 0, nSharedBranches);
auto otherUpperEndIntrinsicVolume =
vtkm::cont::make_ArrayHandleView(incomingUpperEndIntrinsicVolume, 0, nSharedBranches);
auto selfUpperEndDependentVolume = vtkm::cont::make_ArrayHandleView(
branchDecomposer.UpperEndDependentVolume, 0, nSharedBranches);
auto otherUpperEndDependentVolume =
vtkm::cont::make_ArrayHandleView(incomingUpperEndDependentVolume, 0, nSharedBranches);
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::
UpdateBranchEndByExchangeWorklet<ValueType, false>
updateUpperEndWorklet;
invoke(updateUpperEndWorklet,
sharedBranchesIndices,
selfUpperEndGRId,
otherUpperEndGRId,
concreteSelfUpperEndValue,
concreteOtherUpperEndValue,
selfUpperEndSuperarcId,
otherUpperEndSuperarcId,
selfUpperEndIntrinsicVolume,
otherUpperEndIntrinsicVolume,
selfUpperEndDependentVolume,
otherUpperEndDependentVolume);
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
std::stringstream resultStream;
resultStream << "Branches After Combination (nSharedBranches = " << nSharedBranches << ")"
<< std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(
branchDecomposer.BranchRoot.GetNumberOfValues(), resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchRoot", branchDecomposer.BranchRoot, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchRootGRID", branchDecomposer.BranchRootGRId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"UpperEndGRID", branchDecomposer.UpperEndGRId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"UpperEndSuperarcID", branchDecomposer.UpperEndSuperarcId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"UpperEndIntrinsicVolume", branchDecomposer.UpperEndIntrinsicVolume, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"UpperEndDependentVolume", branchDecomposer.UpperEndDependentVolume, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintValues(
"UpperEndValue", inArray, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"LowerEndGRID", branchDecomposer.LowerEndGRId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"LowerEndSuperarcID", branchDecomposer.LowerEndSuperarcId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"LowerEndIntrinsicVolume", branchDecomposer.LowerEndIntrinsicVolume, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"LowerEndDependentVolume", branchDecomposer.LowerEndDependentVolume, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintValues(
"LowerEndValue",
branchDecomposer.LowerEndValue.AsArrayHandle<InArrayHandleType>(),
-1,
resultStream);
resultStream << std::endl;
VTKM_LOG_S(vtkm::cont::LogLevel::Info, resultStream.str());
#endif
};
branchDecomposer.UpperEndValue
.CastAndCallForTypes<vtkm::TypeListScalarAll, vtkm::cont::StorageListBasic>(
resolveValueType);
}
}
for (int cc = 0; cc < rp.out_link().size(); ++cc)
{
auto target = rp.out_link().target(cc);
if (target.gid != selfid)
{
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
rp.enqueue(target, b->GlobalBlockId);
#endif
rp.enqueue(target, branchDecomposer.BranchRootGRId);
rp.enqueue(target, branchDecomposer.UpperEndGRId);
rp.enqueue(target, branchDecomposer.LowerEndGRId);
rp.enqueue(target, branchDecomposer.UpperEndValue);
rp.enqueue(target, branchDecomposer.LowerEndValue);
rp.enqueue(target, branchDecomposer.UpperEndSuperarcId);
rp.enqueue(target, branchDecomposer.LowerEndSuperarcId);
rp.enqueue(target, branchDecomposer.UpperEndIntrinsicVolume);
rp.enqueue(target, branchDecomposer.LowerEndIntrinsicVolume);
rp.enqueue(target, branchDecomposer.UpperEndDependentVolume);
rp.enqueue(target, branchDecomposer.LowerEndDependentVolume);
}
}
}
} // namespace internal
} // namespace scalar_topology
} // namespace filter
} // namespace vtkm

@ -0,0 +1,88 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_filter_scalar_topology_internal_ExchangeBranchEndsFunctor_h
#define vtk_m_filter_scalar_topology_internal_ExchangeBranchEndsFunctor_h
#include <vtkm/filter/scalar_topology/internal/BranchDecompositionBlock.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/diy/diy.h>
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
namespace vtkm
{
namespace filter
{
namespace scalar_topology
{
namespace internal
{
struct ExchangeBranchEndsFunctor
{
VTKM_CONT void operator()(
BranchDecompositionBlock* b,
const vtkmdiy::ReduceProxy& rp, // communication proxy
const vtkmdiy::RegularSwapPartners& // partners of the current block (unused)
) const;
};
} // namespace internal
} // namespace scalar_topology
} // namespace filter
} // namespace vtkm
#endif

@ -446,6 +446,89 @@ inline void TestContourTreeUniformDistributed8x9(int nBlocks, int rank = 0, int
}
}
inline void TestContourTreeUniformDistributedBranchDecomposition8x9(int nBlocks,
int rank = 0,
int size = 1)
{
if (rank == 0)
{
std::cout << "Testing Distributed Branch Decomposition on 2D 8x9 data set " << nBlocks
<< " blocks." << std::endl;
}
vtkm::cont::DataSet in_ds = vtkm::cont::testing::MakeTestDataSet().Make2DUniformDataSet3();
bool augmentHierarchicalTree = true;
bool computeHierarchicalVolumetricBranchDecomposition = true;
vtkm::cont::PartitionedDataSet result =
RunContourTreeDUniformDistributed(in_ds,
"pointvar",
false,
nBlocks,
rank,
size,
augmentHierarchicalTree,
computeHierarchicalVolumetricBranchDecomposition);
if (vtkm::cont::EnvironmentTracker::GetCommunicator().rank() == 0)
{
using Edge = vtkm::worklet::contourtree_distributed::Edge;
std::vector<Edge> computed;
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
auto ds = result.GetPartition(ds_no);
auto upperEndGRId = ds.GetField("UpperEndGlobalRegularIds")
.GetData()
.AsArrayHandle<vtkm::cont::ArrayHandle<vtkm::Id>>()
.ReadPortal();
auto lowerEndGRId = ds.GetField("LowerEndGlobalRegularIds")
.GetData()
.AsArrayHandle<vtkm::cont::ArrayHandle<vtkm::Id>>()
.ReadPortal();
vtkm::Id nBranches = upperEndGRId.GetNumberOfValues();
for (vtkm::Id branch = 0; branch < nBranches; ++branch)
{
Edge edge(upperEndGRId.Get(branch), lowerEndGRId.Get(branch));
if (std::find(computed.begin(), computed.end(), edge) == computed.end())
{
computed.push_back(edge);
}
}
}
std::vector<Edge> expected{
Edge(10, 20), Edge(23, 71), Edge(34, 24), Edge(38, 20), Edge(61, 50)
};
std::sort(computed.begin(), computed.end());
std::sort(expected.begin(), expected.end());
if (computed == expected)
{
std::cout << "Branch Decomposition: Results Match!" << std::endl;
return;
}
std::cout << "Branch Decomposition Results:" << std::endl;
std::cout << "Computed Contour Tree" << std::endl;
for (std::size_t i = 0; i < computed.size(); i++)
{
std::cout << std::setw(12) << computed[i].low << std::setw(14) << computed[i].high
<< std::endl;
}
std::cout << "Expected Contour Tree" << std::endl;
for (std::size_t i = 0; i < expected.size(); i++)
{
std::cout << std::setw(12) << expected[i].low << std::setw(14) << expected[i].high
<< std::endl;
}
VTKM_TEST_FAIL("Branch Decomposition Failed!");
}
}
inline void TestContourTreeUniformDistributed5x6x7(int nBlocks,
bool marchingCubes,
int rank = 0,

@ -58,6 +58,8 @@ using vtkm::filter::testing::contourtree_uniform_distributed::TestContourTreeFil
using vtkm::filter::testing::contourtree_uniform_distributed::
TestContourTreeUniformDistributed5x6x7;
using vtkm::filter::testing::contourtree_uniform_distributed::TestContourTreeUniformDistributed8x9;
using vtkm::filter::testing::contourtree_uniform_distributed::
TestContourTreeUniformDistributedBranchDecomposition8x9;
class TestContourTreeUniformDistributedFilter
{
@ -70,6 +72,12 @@ public:
TestContourTreeUniformDistributed8x9(4);
TestContourTreeUniformDistributed8x9(8);
TestContourTreeUniformDistributed8x9(16);
TestContourTreeUniformDistributedBranchDecomposition8x9(2);
TestContourTreeUniformDistributedBranchDecomposition8x9(4);
TestContourTreeUniformDistributedBranchDecomposition8x9(8);
TestContourTreeUniformDistributedBranchDecomposition8x9(16);
TestContourTreeUniformDistributed5x6x7(2, false);
TestContourTreeUniformDistributed5x6x7(4, false);
TestContourTreeUniformDistributed5x6x7(8, false);

@ -128,8 +128,10 @@
#include <vtkm/filter/scalar_topology/worklet/contourtree_distributed/hierarchical_contour_tree/FindSuperArcBetweenNodes.h>
// Worklets
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/BranchEndComparator.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/CollapseBranchesPointerDoublingWorklet.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/CollapseBranchesWorklet.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/GetOuterEndWorklet.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/LocalBestUpDownByVolumeBestUpDownEdgeWorklet.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/LocalBestUpDownByVolumeInitSuperarcListWorklet.h>
#include <vtkm/filter/scalar_topology/worklet/branch_decomposition/hierarchical_volumetric_branch_decomposer/LocalBestUpDownByVolumeWorklet.h>
@ -164,6 +166,23 @@ public:
vtkm::worklet::contourtree_augmented::IdArrayType UpVolume;
vtkm::worklet::contourtree_augmented::IdArrayType DownVolume;
/// working arrays for collecting ends of branches
/// kept at class level for branch aggregation
/// Note: Intrinsic Volume and Dependent Volume are only for the superarcs at the end
vtkm::worklet::contourtree_augmented::IdArrayType BranchRoot;
vtkm::worklet::contourtree_augmented::IdArrayType BranchRootGRId;
vtkm::worklet::contourtree_augmented::IdArrayType UpperEndGRId;
vtkm::worklet::contourtree_augmented::IdArrayType LowerEndGRId;
vtkm::cont::UnknownArrayHandle UpperEndValue;
vtkm::cont::UnknownArrayHandle LowerEndValue;
vtkm::worklet::contourtree_augmented::IdArrayType UpperEndSuperarcId;
vtkm::worklet::contourtree_augmented::IdArrayType LowerEndSuperarcId;
vtkm::worklet::contourtree_augmented::IdArrayType UpperEndIntrinsicVolume;
vtkm::worklet::contourtree_augmented::IdArrayType LowerEndIntrinsicVolume;
vtkm::worklet::contourtree_augmented::IdArrayType UpperEndDependentVolume;
vtkm::worklet::contourtree_augmented::IdArrayType LowerEndDependentVolume;
/// routines to compute branch decomposition by volume
/// WARNING: we now have two types of hierarchical tree sharing a data structure:
/// I. hierarchical tree without augmentation
@ -200,6 +219,16 @@ public:
void CollapseBranches(const vtkm::cont::DataSet& hierarchicalTreeDataSet,
vtkm::worklet::contourtree_augmented::IdArrayType& branchRoot);
/// routine to find the upper node and the lower node of all branches within the local block
template <bool isLower>
void CollectEndsOfBranches(const vtkm::cont::DataSet& hierarchicalTreeDataSet,
vtkm::worklet::contourtree_augmented::IdArrayType& branchRoot);
/// routine to compress out duplicate branch IDs from the results of CollectEndsOfBranches
/// has to call CollectEndsOfBranches for both upper end and lower end
void CollectBranches(const vtkm::cont::DataSet& hierarchicalTreeDataSet,
vtkm::worklet::contourtree_augmented::IdArrayType& branchRoot);
/// routines to print branches
template <typename IdArrayHandleType, typename DataValueArrayHandleType>
static std::string PrintBranches(const IdArrayHandleType& hierarchicalTreeSuperarcsAH,
@ -509,6 +538,393 @@ inline void HierarchicalVolumetricBranchDecomposer::CollapseBranches(
} // CollapseBranches
//// CollectEndsOfBranches
//// Find ends of branches locally
//// STEP 1A: Find upper end of branch locally
//// Segmented sort by branch ID of value of upper node of superarc
//// Sort superarcs by value value of upper node, segmenting by branchID
//// Upper node determined using ascending flag of superarc array
//// NOTE: Superarc array is stored in HierarchicalTreeDataSet
//// if ascending flag is NOT set, upper node is the source node of the superarc, whose
//// supernode ID is guaranteed to be the same as the ID of the superarc
//// if ascending flag is set, upper node is the target node of the superarc, which is stored
//// in the superarc array but maskIndex must be called to strip out flags
//// Create index array with IDs of all superarcs:
//// * Size is Supernodes.Size()-1 or Superarcs.Size()-1 because of last node as NULL superarc
//// * Fill vtk-m equivalent of std::iota
//// Segmented sort of the "superarcs" array sort by three keys:
//// (1) branchID (most senior superarc),
//// (2) data value
//// (3) global regular id (for simulation of simplicity)
//// Find highest vertex for branch (i.e., before branchID increases), special case for
//// end of array.
//// STEP 1B: Find lower end of branch locally
//// Inverse to STEP 1A
////
//// isLower: True if we look for the lower end of branches
template <bool isLower>
inline void HierarchicalVolumetricBranchDecomposer::CollectEndsOfBranches(
const vtkm::cont::DataSet& hierarchicalTreeDataSet,
vtkm::worklet::contourtree_augmented::IdArrayType& branchRoots)
{
using IdArrayType = vtkm::worklet::contourtree_augmented::IdArrayType;
// Array superNodes stores the LOCAL regular ID of the superarc (supernode) to locate the data value
// size: nSuperarcs
auto supernodes =
hierarchicalTreeDataSet.GetField("Supernodes").GetData().AsArrayHandle<IdArrayType>();
// Array superArcs stores the target supernode of the superarc
// size: nSuperarcs
// NOTE: NSE referring to the innermost node. We will filter this node later.
auto superarcs =
hierarchicalTreeDataSet.GetField("Superarcs").GetData().AsArrayHandle<IdArrayType>();
vtkm::Id nSuperarcs = superarcs.GetNumberOfValues();
// data value in UnknownArrayHandle
// size: nVertices
auto dataValues = hierarchicalTreeDataSet.GetField("DataValues").GetData();
// global regular IDs are used for simulation of simplicity to break ties
// size: nVertices
auto globalRegularIds =
hierarchicalTreeDataSet.GetField("RegularNodeGlobalIds").GetData().AsArrayHandle<IdArrayType>();
auto intrinsicVolumes =
hierarchicalTreeDataSet.GetField("IntrinsicVolume").GetData().AsArrayHandle<IdArrayType>();
auto dependentVolumes =
hierarchicalTreeDataSet.GetField("DependentVolume").GetData().AsArrayHandle<IdArrayType>();
#ifdef DEBUG_HIERARCHICAL_VOLUMETRIC_BRANCH_DECOMPOSER
if (isLower)
{
IdArrayType superarcGRId;
vtkm::cont::make_ArrayHandlePermutation(supernodes, globalRegularIds);
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
globalRegularIds, supernodes, superarcGRId);
std::stringstream resultStream;
resultStream << "All Information In The Block" << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(nSuperarcs, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices("Superarcs", superarcs, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices("Supernodes", supernodes, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Regular IDs", superarcGRId, -1, resultStream);
auto resolveOutput = [&](const auto& inArray) {
using InArrayHandleType = std::decay_t<decltype(inArray)>;
using ValueType = typename InArrayHandleType::ValueType;
vtkm::cont::ArrayHandle<ValueType> superarcValue;
vtkm::worklet::contourtree_augmented::PermuteArray<ValueType>(
inArray, supernodes, superarcValue);
vtkm::worklet::contourtree_augmented::PrintValues(
"Data Values", superarcValue, -1, resultStream);
};
dataValues.CastAndCallForTypes<vtkm::TypeListScalarAll, vtkm::cont::StorageListBasic>(
resolveOutput);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volumes", intrinsicVolumes, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volumes", dependentVolumes, -1, resultStream);
resultStream << std::endl;
VTKM_LOG_S(vtkm::cont::LogLevel::Info, resultStream.str());
}
#endif
// Get the outer end of all superarcs
// Pseudo-code of the worklet GetSuperarcOuterNodeWorklet in serial:
// ** Initialize array for outerNodes and its portal **
// for (vtkm::Id i=0; i<nSuperarcs; i++){
// if (NoSuchElement(superarcs[i]){
// outNodes[i] = NO_SUCH_ELEMENT;
// continue;
// }
//
// bool ascendingSuperarc = IsAscending(superarcs[i]);
// if (ascendingSuperarc ^ isLower){
// vtkm::Id superarcTo = MaskedIndex(superarcs[i]);
// outerNodes[i] = superarcTo;
// }
// else{
// outerNodes[i] = i;
// }
// }
// Other masked arrays: Hyperarcs, (Superarcs), Arcs, Hyperparents, Superparents
// Rule of thumb:
// 1. any arc/parent arrays can have ascending flag information
// 2. always assume flag information on everything except proved otherwise
// NOTE: NSE is always a flag on everything
vtkm::cont::ArrayHandleIndex superarcIndices(nSuperarcs);
IdArrayType outerNodes;
outerNodes.Allocate(nSuperarcs);
// boolean parameter determines whether looking for the lower end of the superarc or not
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::
GetSuperarcOuterNodeWorklet<isLower>
getSuperarcOuterNodeWorklet;
vtkm::cont::Invoker invoke;
invoke(getSuperarcOuterNodeWorklet, // worklet
superarcIndices, // input
superarcs, // input
outerNodes); // output
// create a list of the non-virtual superarcs (all superarcs except the most senior one)
IdArrayType actualSuperarcs;
// fill it up with index values [0, 1, 2 ... nSuperarcs-1]
// while keeping only those indices where the Superarcs is not NSE.
vtkm::cont::Algorithm::CopyIf(superarcIndices, // input
superarcs, // stencil
actualSuperarcs, // output target array
vtkm::worklet::contourtree_augmented::NotNoSuchElementPredicate{});
vtkm::Id nActualSuperarcs = actualSuperarcs.GetNumberOfValues();
// Get the branch Id, data value and global regular ID for each actual superarc to be sorted
// P.S. the data value and the regular ID of OUTER nodes of the superarc
// Pseudo-code in serial (no explicit flag removal process):
// for (vtkm::Id i=0; i<nActualSuperarcs; i++)
// {
// actualBranchRoots[i] = branchRoots[actualSuperarcs[i]];
// actualOuterNodeValues[i] = dataValues[supernodes[outerNodes[actualSuperarcs[i]]]];
// actualOuterNodeRegularIds[i] = globalRegularIds[supernodes[outerNodes[actualSuperarcs[i]]]];
// }
// Solution: PermuteArray helps allocate the space so no need for explicit allocation
// PermuteArray also calls MaskedIndex
// IdArrayType, size: nActualSuperarcs
IdArrayType actualBranchRoots;
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
branchRoots, actualSuperarcs, actualBranchRoots);
// IdArrayType, size: nActualSuperarcs
IdArrayType actualOuterNodes;
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
outerNodes, actualSuperarcs, actualOuterNodes);
// IdArrayType, size: nActualSuperarcs
IdArrayType actualOuterNodeLocalIds;
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
supernodes, actualOuterNodes, actualOuterNodeLocalIds);
// IdArrayType, size: nActualSuperarcs
IdArrayType actualOuterNodeRegularIds;
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
globalRegularIds, actualOuterNodeLocalIds, actualOuterNodeRegularIds);
auto resolveArray = [&](const auto& inArray) {
using InArrayHandleType = std::decay_t<decltype(inArray)>;
using ValueType = typename InArrayHandleType::ValueType;
// Sort all superarcs based on the key in order
// (1) branchID (most senior superarc),
// (2) data value
// (3) global regular id (for simulation of simplicity)
// ValueArrayType
auto actualOuterNodeValues =
vtkm::cont::make_ArrayHandlePermutation(actualOuterNodeLocalIds, inArray);
vtkm::cont::ArrayHandleIndex actualSuperarcsIdx(nActualSuperarcs);
// IdArrayType, size: nActualSuperarcs, value range: [0, nActualSuperarcs-1]
// This array is ONLY used for sorting
// NOTE: To be distinguished from actualSuperarcs, whose value range is [0, nSuperarcs-1]
IdArrayType sortedSuperarcs;
vtkm::cont::Algorithm::Copy(actualSuperarcsIdx, sortedSuperarcs);
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::
BranchEndComparator<ValueType, isLower>
branchEndComparator(actualBranchRoots, actualOuterNodeValues, actualOuterNodeRegularIds);
vtkm::cont::Algorithm::Sort(sortedSuperarcs, branchEndComparator);
/// Permute the branch roots and global regular IDs based on the sorted order
/// Then segment selection: pick the last element for each consecutive segment of BranchRoots
/// Solution: mark the last element as 1 in a 01 array
// This is the real superarc local ID after permutation
IdArrayType permutedActualSuperarcs;
vtkm::worklet::contourtree_augmented::PermuteArray<vtkm::Id>(
actualSuperarcs, sortedSuperarcs, permutedActualSuperarcs);
#ifdef DEBUG_HIERARCHICAL_VOLUMETRIC_BRANCH_DECOMPOSER
{
std::stringstream resultStream;
resultStream << "Sorted Actual Superarcs" << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(nActualSuperarcs, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"actualSortSuperarcs", sortedSuperarcs, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"actualSuperarcs", permutedActualSuperarcs, -1, resultStream);
resultStream << std::endl;
VTKM_LOG_S(vtkm::cont::LogLevel::Info, resultStream.str());
}
#endif
// NOTE: permutedSuperarcsTo stores the superarcTo information.
// It should only be used to determine the direction of the superarc
auto permutedSuperarcsTo =
vtkm::cont::make_ArrayHandlePermutation(permutedActualSuperarcs, superarcs);
// The following
auto permutedBranchRoots =
vtkm::cont::make_ArrayHandlePermutation(sortedSuperarcs, actualBranchRoots);
auto permutedRegularIds =
vtkm::cont::make_ArrayHandlePermutation(sortedSuperarcs, actualOuterNodeRegularIds);
auto permutedDataValues =
vtkm::cont::make_ArrayHandlePermutation(sortedSuperarcs, actualOuterNodeValues);
auto permutedIntrinsicVolumes =
vtkm::cont::make_ArrayHandlePermutation(permutedActualSuperarcs, intrinsicVolumes);
auto permutedDependentVolumes =
vtkm::cont::make_ArrayHandlePermutation(permutedActualSuperarcs, dependentVolumes);
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::OneIfBranchEndWorklet
oneIfBranchEndWorklet;
IdArrayType oneIfBranchEnd;
oneIfBranchEnd.Allocate(nActualSuperarcs);
invoke(oneIfBranchEndWorklet, // worklet
actualSuperarcsIdx, // input
permutedBranchRoots, // whole array input, need to check the neighbor information
oneIfBranchEnd // output
);
IdArrayType actualDirectedSuperarcs;
actualDirectedSuperarcs.Allocate(nActualSuperarcs);
vtkm::worklet::scalar_topology::hierarchical_volumetric_branch_decomposer::
CopyArcDirectionWorklet copyArcDirectionWorklet;
invoke(copyArcDirectionWorklet,
permutedActualSuperarcs,
permutedSuperarcsTo,
actualDirectedSuperarcs);
// For all branch roots, we need their global regular IDs for communication
// Pseudo-code:
// for (vtkm::Id i=0; i<nBranches; i++)
// branchRootGRIds[i] = globalRegularIds[supernode[permutedBranchRoots[i]]]
auto branchRootRegIds =
vtkm::cont::make_ArrayHandlePermutation(permutedBranchRoots, supernodes);
auto branchRootGRIds =
vtkm::cont::make_ArrayHandlePermutation(branchRootRegIds, globalRegularIds);
// We only keep the end of the branch in the arrays for future process
// each branch in the block should store exactly one entry
// We keep the following information
// (1) Branch ID (senior-most superarc ID), and its global regular ID
// (2) Superarc ID on both ends of the branch
// (3) Global regular ID and data value of supernodes at the branch ends
// (4) Intrinsic / Dependent volume of superarcs at the branch ends
if (isLower)
{
vtkm::cont::Algorithm::CopyIf(permutedBranchRoots, oneIfBranchEnd, this->BranchRoot);
vtkm::cont::Algorithm::CopyIf(branchRootGRIds, oneIfBranchEnd, this->BranchRootGRId);
vtkm::cont::Algorithm::CopyIf(
actualDirectedSuperarcs, oneIfBranchEnd, this->LowerEndSuperarcId);
vtkm::cont::Algorithm::CopyIf(permutedRegularIds, oneIfBranchEnd, this->LowerEndGRId);
vtkm::cont::Algorithm::CopyIf(
permutedIntrinsicVolumes, oneIfBranchEnd, this->LowerEndIntrinsicVolume);
vtkm::cont::Algorithm::CopyIf(
permutedDependentVolumes, oneIfBranchEnd, this->LowerEndDependentVolume);
InArrayHandleType lowerEndValue;
vtkm::cont::Algorithm::CopyIf(permutedDataValues, oneIfBranchEnd, lowerEndValue);
this->LowerEndValue = lowerEndValue;
}
else
{
// VERIFICATION: We assume that lower end is computed
// Go check HierarchicalVolumetricBranchDecomposer::CollectBranches() to see the order
// We have already got the unique branch ID along with its branch lower end
// the BranchRoot should be in the same order as UpperBranchRoot
// Let's do a sanity check here. Would remove this part upon release
{
IdArrayType UpperBranchRoot;
vtkm::cont::Algorithm::CopyIf(permutedBranchRoots, oneIfBranchEnd, UpperBranchRoot);
bool identical =
this->BranchRoot.GetNumberOfValues() == UpperBranchRoot.GetNumberOfValues();
if (identical)
{
vtkm::cont::ArrayHandle<bool> branchRootIdentical;
vtkm::cont::Algorithm::Transform(
this->BranchRoot, UpperBranchRoot, branchRootIdentical, vtkm::Equal());
identical = vtkm::cont::Algorithm::Reduce(branchRootIdentical, true, vtkm::LogicalAnd());
}
if (!identical)
{
VTKM_LOG_S(vtkm::cont::LogLevel::Error,
"Two reduced BranchRoot arrays are not identical!");
}
}
vtkm::cont::Algorithm::CopyIf(branchRootGRIds, oneIfBranchEnd, this->BranchRootGRId);
vtkm::cont::Algorithm::CopyIf(
actualDirectedSuperarcs, oneIfBranchEnd, this->UpperEndSuperarcId);
vtkm::cont::Algorithm::CopyIf(permutedRegularIds, oneIfBranchEnd, this->UpperEndGRId);
vtkm::cont::Algorithm::CopyIf(
permutedIntrinsicVolumes, oneIfBranchEnd, this->UpperEndIntrinsicVolume);
vtkm::cont::Algorithm::CopyIf(
permutedDependentVolumes, oneIfBranchEnd, this->UpperEndDependentVolume);
InArrayHandleType upperEndValue;
vtkm::cont::Algorithm::CopyIf(permutedDataValues, oneIfBranchEnd, upperEndValue);
this->UpperEndValue = upperEndValue;
}
};
dataValues.CastAndCallForTypes<vtkm::TypeListScalarAll, vtkm::cont::StorageListBasic>(
resolveArray);
#ifdef DEBUG_HIERARCHICAL_VOLUMETRIC_BRANCH_DECOMPOSER
std::stringstream resultStream;
const std::string lowerStr = isLower ? "Lower" : "Upper";
resultStream << "Actual Branches With " << lowerStr << " Ends In The Block" << std::endl;
const IdArrayType& printBranchEndRegularId = isLower ? this->LowerEndGRId : this->UpperEndGRId;
const IdArrayType& printBranchEndSuperarcId =
isLower ? this->LowerEndSuperarcId : this->UpperEndSuperarcId;
const IdArrayType& printBranchEndIntrinsicVolume =
isLower ? this->LowerEndIntrinsicVolume : this->UpperEndIntrinsicVolume;
const IdArrayType& printBranchEndDependentVolume =
isLower ? this->LowerEndDependentVolume : this->UpperEndDependentVolume;
const vtkm::Id nBranches = this->BranchRoot.GetNumberOfValues();
vtkm::worklet::contourtree_augmented::PrintHeader(nBranches, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchRoot", this->BranchRoot, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchRootRegularId", this->BranchRootGRId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchEndSuperarcId", printBranchEndSuperarcId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchEndRegularId", printBranchEndRegularId, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchEndIntrinsicVolume", printBranchEndIntrinsicVolume, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"BranchEndDependentVolume", printBranchEndDependentVolume, -1, resultStream);
resultStream << std::endl;
VTKM_LOG_S(vtkm::cont::LogLevel::Info, resultStream.str());
#endif
} // CollectEndsOfBranches
//// CollectBranches
//// Step 1A + 1B. Call CollectEndsOfBranches to find ends of branches locally
//// STEP 1C: Compress out duplicate branch IDs
//// * Temporary array "knownBranches" with size of superarcs array, initialize to NO_SUCH_ELEMENT
//// * Every highest vertex we find in STEP 1A has a branch ID, use that ID to set knownBranches[bID] = bID;
//// . * Remove/compress out NO_SUCH_ELEMENT entries
//// . * Array now is a list of all known (to the block) branches
//// STEP 2: Look up (and add) global regular ID, value, and terminal volume both intrinsic and dependent
//// Target: get the information to explicitly extract the branch
//// NOTE: Both STEP 1 and STEP 2 are implemented in HierarchicalVolumetricBranchDecomposer::CollectBranches()
inline void HierarchicalVolumetricBranchDecomposer::CollectBranches(
const vtkm::cont::DataSet& hierarchicalTreeDataSet,
vtkm::worklet::contourtree_augmented::IdArrayType& branchRoot)
{
// The order of these two lines matters
// check the comment noted "VERIFICATION" above
// Step 1B + 1C + 2: Collect the lower node of all branches in the block
this->CollectEndsOfBranches<true>(hierarchicalTreeDataSet, branchRoot);
// Step 1A + 1C + 2: Collect the upper node of all branches in the block
this->CollectEndsOfBranches<false>(hierarchicalTreeDataSet, branchRoot);
}
// PrintBranches
// we want to dump out the branches as viewed by this rank.
// most of the processing will be external, so we keep this simple.
@ -609,6 +1025,7 @@ inline std::string HierarchicalVolumetricBranchDecomposer::PrintBranches(
} // PrintBranches
// debug routine
inline std::string HierarchicalVolumetricBranchDecomposer::DebugPrint(std::string message,
const char* fileName,

@ -0,0 +1,202 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_branch_end_comparator_h
#define vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_branch_end_comparator_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace scalar_topology
{
namespace hierarchical_volumetric_branch_decomposer
{
using IdArrayType = vtkm::worklet::contourtree_augmented::IdArrayType;
// comparator used for initial sort of data values
template <typename ValueType, bool isLower>
class BranchEndComparatorImpl
{
public:
using ValueArrayType = typename vtkm::cont::ArrayHandle<ValueType>;
using ValuePermutationType =
typename vtkm::cont::ArrayHandlePermutation<IdArrayType, ValueArrayType>;
using IdPortalType = typename IdArrayType::ReadPortalType;
using ValuePortalType = typename ValuePermutationType::ReadPortalType;
// constructor
VTKM_CONT
BranchEndComparatorImpl(const IdArrayType& branchRoots,
const ValuePermutationType& dataValues,
const IdArrayType& globalRegularIds,
vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token)
: branchRootsPortal(branchRoots.PrepareForInput(device, token))
, dataValuesPortal(dataValues.PrepareForInput(device, token))
, globalRegularIdsPortal(globalRegularIds.PrepareForInput(device, token))
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& i, const vtkm::Id& j) const
{ // operator()
vtkm::Id branchI =
vtkm::worklet::contourtree_augmented::MaskedIndex(this->branchRootsPortal.Get(i));
vtkm::Id branchJ =
vtkm::worklet::contourtree_augmented::MaskedIndex(this->branchRootsPortal.Get(j));
// primary sort on branch ID
if (branchI < branchJ)
{
return true;
}
if (branchJ < branchI)
{
return false;
}
ValueType valueI = this->dataValuesPortal.Get(i);
ValueType valueJ = this->dataValuesPortal.Get(j);
// secondary sort on data value
// if isLower is false, lower value first
// if isLower is true, higher value first
if (((!isLower) && (valueI < valueJ)) || ((isLower) && (valueI > valueJ)))
{
return true;
}
if (((!isLower) && (valueI > valueJ)) || ((isLower) && (valueI < valueJ)))
{
return false;
}
vtkm::Id idI =
vtkm::worklet::contourtree_augmented::MaskedIndex(this->globalRegularIdsPortal.Get(i));
vtkm::Id idJ =
vtkm::worklet::contourtree_augmented::MaskedIndex(this->globalRegularIdsPortal.Get(j));
// third sort on global regular id
// if isLower is false, lower value first
// if isLower is true, higher value first
if (((!isLower) && (idI < idJ)) || ((isLower) && (idI > idJ)))
{
return true;
}
if (((!isLower) && (idI > idJ)) || ((isLower) && (idI < idJ)))
{
return false;
}
// fallback just in case
return false;
} // operator()
private:
IdPortalType branchRootsPortal;
ValuePortalType dataValuesPortal;
IdPortalType globalRegularIdsPortal;
}; // BranchEndComparatorImpl
/// <summary>
/// Sort comparator for superarcs to determine the upper/lower end of branches
/// </summary>
/// <typeparam name="ValueType">data value type</typeparam>
/// <typeparam name="isLower">true if we look for the lower end</typeparam>
template <typename ValueType, bool isLower>
class BranchEndComparator : public vtkm::cont::ExecutionObjectBase
{
public:
using ValueArrayType = typename vtkm::cont::ArrayHandle<ValueType>;
using ValuePermutationType =
typename vtkm::cont::ArrayHandlePermutation<IdArrayType, ValueArrayType>;
// constructor
VTKM_CONT
BranchEndComparator(const IdArrayType& branchRoots,
const ValuePermutationType& dataValues,
const IdArrayType& globalRegularIds)
: BranchRoots(branchRoots)
, DataValues(dataValues)
, GlobalRegularIds(globalRegularIds)
{
}
VTKM_CONT BranchEndComparatorImpl<ValueType, isLower> PrepareForExecution(
vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const
{
return BranchEndComparatorImpl<ValueType, isLower>(
this->BranchRoots, this->DataValues, this->GlobalRegularIds, device, token);
}
private:
IdArrayType BranchRoots;
ValuePermutationType DataValues;
IdArrayType GlobalRegularIds;
}; // BranchEndComparator
} // namespace hierarchical_volumetric_branch_decomposer
} // namespace scalar_topology
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,141 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_branch_end_global_update_h
#define vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_branch_end_global_update_h
#include <vtkm/worklet/WorkletMapField.h>
namespace vtkm
{
namespace worklet
{
namespace scalar_topology
{
namespace hierarchical_volumetric_branch_decomposer
{
/// Worklet to update the information of branch end on a local block
/// by comparing to the same branch in neighbor blocks
template <typename ValueType, bool isLower>
class UpdateBranchEndByExchangeWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(FieldIn sharedBranchID, // (input)
FieldInOut selfEndGRID, // (input/output)
FieldIn incomingEndGRID, // (input)
FieldInOut selfEndValue, // (input/output)
FieldIn incomingEndValue, // (input)
FieldInOut selfEndSuperarcID, // (input/output)
FieldIn incomingEndSuperarcID, // (input)
FieldInOut selfEndIntrinsicVolume, // (input/output)
FieldIn incomingEndIntrinsicVolume, // (input)
FieldInOut selfEndDependentVolume, // (input/output)
FieldIn incomingEndDependentVolume // (input)
);
/// Constructor, empty
VTKM_EXEC_CONT
UpdateBranchEndByExchangeWorklet() {}
/// The functor checks whether the incomingEnd is a better one than selfEnd
/// If yes, update all information for the selfEnd
/// Otherwise, do nothing
VTKM_EXEC void operator()(const vtkm::Id& sharedBranchID,
vtkm::Id& selfEndGRID,
const vtkm::Id& incomingEndGRID,
ValueType& selfEndValue,
const ValueType& incomingEndValue,
vtkm::Id& selfEndSuperarcID,
const vtkm::Id& incomingEndSuperarcID,
vtkm::Id& selfEndIntrinsicVolume,
const vtkm::Id& incomingEndIntrinsicVolume,
vtkm::Id& selfEndDependentVolume,
const vtkm::Id& incomingEndDependentVolume) const
{
// sharedBranchID is only used as an index anchor for all shared branches.
// We don't use its content.
if (selfEndGRID == incomingEndGRID)
return;
// isLower == True: if self is lower than incoming, do nothing
// isLower == False: if self is higher than incoming, do nothing
if (isLower &&
(selfEndValue < incomingEndValue ||
(selfEndValue == incomingEndValue && selfEndGRID < incomingEndGRID)))
return;
if (!isLower &&
(selfEndValue > incomingEndValue ||
(selfEndValue == incomingEndValue && selfEndGRID > incomingEndGRID)))
return;
// Time to update
selfEndGRID = incomingEndGRID;
selfEndValue = incomingEndValue;
selfEndSuperarcID = incomingEndSuperarcID;
selfEndIntrinsicVolume = incomingEndIntrinsicVolume;
selfEndDependentVolume = incomingEndDependentVolume;
// Prevent unused parameter warning
(void)sharedBranchID;
}
}; // UpdateBranchEndByExchangeWorklet
} // namespace hierarchical_volumetric_branch_decomposer
} // namespace scalar_topology
} // namespace worklet
} // namespace vtkm
#endif

@ -9,9 +9,12 @@
##============================================================================
set(headers
BranchEndComparator.h
BranchEndGlobalUpdateWorklet.h
CollapseBranchesPointerDoublingWorklet.h
CollapseBranchesWorklet.h
FindBestSupernodeWorklet.h
GetOuterEndWorklet.h
LocalBestUpDownByVolumeBestUpDownEdgeWorklet.h
LocalBestUpDownByVolumeInitSuperarcListWorklet.h
LocalBestUpDownByVolumeWorklet.h

@ -0,0 +1,176 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_get_outer_end_worklet_h
#define vtk_m_filter_scalar_topology_worklet_branch_decomposition_hierarchical_volumetric_branch_decomposer_get_outer_end_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
namespace vtkm
{
namespace worklet
{
namespace scalar_topology
{
namespace hierarchical_volumetric_branch_decomposer
{
/// Worklet for getting the outer node ID of a superarc
template <bool isLower>
class GetSuperarcOuterNodeWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(
FieldIn superarcId, // (input) superarc ID without flag bits
FieldIn superarcTo, // (input) target node ID of the superarc with flag bits
FieldOut endNodeId // (output) end node ID of the superarc
);
using ExecutionSignature = _3(_1, _2);
using InputDomain = _1;
/// Constructor
/// isLower determines whether to find the upper end or the lower end of the superarc
VTKM_EXEC_CONT
GetSuperarcOuterNodeWorklet() {}
/// The functor checks the direction of the superarc based on the flag information
/// and returns the outer end supernode of the superarc (without flag information)
VTKM_EXEC vtkm::Id operator()(const vtkm::Id& superarcId, const vtkm::Id& superarcTo) const
{
if (contourtree_augmented::NoSuchElement(superarcTo))
{
return contourtree_augmented::NO_SUCH_ELEMENT;
}
bool ascendingSuperarc = contourtree_augmented::IsAscending(superarcTo);
if (isLower ^ ascendingSuperarc)
{
return contourtree_augmented::MaskedIndex(superarcTo);
}
else
{
return superarcId;
}
}
}; // GetSuperarcOuterNodeWorklet
/// Worklet for getting the outer node ID of a branch
class OneIfBranchEndWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(
FieldIn superarcId, // (input) actual ID of superarc
WholeArrayIn branchRoots, // (array input) branch root (superarc) IDs of all superarcs
FieldOut branchEndIndicator // (output) 1 if
);
using ExecutionSignature = _3(_1, _2);
using InputDomain = _1;
/// Constructor
/// isLower determines whether to find the upper end or the lower end of the branch
VTKM_EXEC_CONT
OneIfBranchEndWorklet() {}
template <typename InIdPortalType>
VTKM_EXEC vtkm::Id operator()(const vtkm::Id superarcId,
const InIdPortalType& branchRootsPortal) const
{
const vtkm::Id superarcSize = branchRootsPortal.GetNumberOfValues();
// handle illegal situations
if (superarcId < 0 || superarcId >= superarcSize)
return 0;
// if last element
if (superarcSize == superarcId + 1)
return 1;
return branchRootsPortal.Get(superarcId) != branchRootsPortal.Get(superarcId + 1) ? 1 : 0;
}
}; // OneIfBranchEndWorklet
class CopyArcDirectionWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(
FieldIn superarcId, // (input) superarc ID
FieldIn superarcTo, // (input) target of superarc, including ascending flag
FieldOut DirectedSuperarcId // (output) superarc ID with ascending flag
);
using ExecutionSignature = _3(_1, _2);
using InputDomain = _1;
VTKM_EXEC_CONT
CopyArcDirectionWorklet() {}
VTKM_EXEC vtkm::Id operator()(const vtkm::Id superarcId, const vtkm::Id superarcTo) const
{
if (contourtree_augmented::IsAscending(superarcTo))
{
return superarcId | (contourtree_augmented::IS_ASCENDING);
}
else
{
return superarcId;
}
}
}; // CopyArcDirectionWorklet
} // namespace hierarchical_volumetric_branch_decomposer
} // namespace scalar_topology
} // namespace worklet
} // namespace vtkm
#endif

@ -53,6 +53,7 @@
#ifndef vtk_m_worklet_contourtree_augmented_mesh_dem_get_owned_vertices_by_global_id_worklet_h
#define vtk_m_worklet_contourtree_augmented_mesh_dem_get_owned_vertices_by_global_id_worklet_h
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/Types.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/data_set_mesh/IdRelabeler.h>
#include <vtkm/worklet/WorkletMapField.h>

@ -73,6 +73,8 @@
#include <vtkm/cont/ArrayHandleIndex.h>
#include <vtkm/cont/ArrayHandlePermutation.h>
#include <vtkm/cont/ArrayPortalToIterators.h>
#include <vtkm/cont/ArrayRangeCompute.h>
#include <vtkm/cont/ArrayRangeComputeTemplate.h>
#include <vtkm/cont/ConvertNumComponentsToOffsets.h>
#include <vtkm/cont/EnvironmentTracker.h>
#include <vtkm/cont/Timer.h>
@ -574,7 +576,10 @@ template <typename FieldType>
inline void ContourTreeMesh<FieldType>::ComputeMaxNeighbors()
{
auto neighborCounts = make_ArrayHandleOffsetsToNumComponents(this->NeighborOffsets);
this->MaxNeighbors = vtkm::cont::Algorithm::Reduce(neighborCounts, 0, vtkm::Maximum{});
vtkm::cont::ArrayHandle<vtkm::Range> rangeArray =
vtkm::cont::ArrayRangeComputeTemplate(neighborCounts);
this->MaxNeighbors = static_cast<vtkm::Id>(rangeArray.ReadPortal().Get(0).Max);
}
// Define the behavior for the execution object generate by the PrepareForExecution function