Merge topic 'add/hybrid_hypersweep'

02789a0d4 Improve comments
ed6b0f008 Fix CMakeLists.txt after moving header file.
5fd1b8ce8 Moved I/O functions to testing directory.
b6db5ddaa Make GlobalBlockId int to reduce casts. Renamed BlockIndex to LocalBlockNo.
bec58bd73 Make DIY own blocks and delete them to prevent memory leak on exception
50ca0318b Added missing ExecutionSignature.
f96020a24 Consolidated two copies of NotNoSuchElementPredicate.h
65352d7f0 Remove extranous Allocate in HierarchicalAugmenter
...

Acked-by: Kitware Robot <kwrobot@kitware.com>
Acked-by: Vicente Bolea <vicente.bolea@kitware.com>
Acked-by: Sujin Philip <sujin.philip@kitware.com>
Merge-request: !2587
This commit is contained in:
Gunther Weber 2021-11-10 18:54:58 +00:00 committed by Kitware Robot
commit d974477e40
75 changed files with 8496 additions and 493 deletions

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c5c3afc7bdb0fa75bad78fb69ce0557c752b98cab6f000d757ef2706a4498576
size 2116

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e31bdb91419a32e98dc1061ae6ce348f8fbc6c79b26a4e0bcd352d2b2e62b6a8
size 2228

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ccbe5767ce622b4c0e5e93dd6f6243d5c76a5a6f07c1e5cf43fb2b5e2e069255
size 2076

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6e9f0913c17a3a12338d043c455c6e9cde51c72fa70d619d935fa4369effed45
size 2316

@ -84,5 +84,7 @@ if (VTKm_ENABLE_MPI)
configure_file(split_data_2d.py split_data_2d.py COPYONLY)
configure_file(split_data_3d.py split_data_3d.py COPYONLY)
configure_file(hact_test.sh hact_test.sh COPYONLY)
configure_file(hact_test_volume.sh hact_test_volume.sh COPYONLY)
configure_file(testrun.sh testrun.sh COPYONLY)
configure_file(testrun_volume.sh testrun_volume.sh COPYONLY)
endif()

@ -75,6 +75,7 @@
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/ProcessContourTree.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/worklet/contourtree_distributed/TreeCompiler.h>
// clang-format off
@ -86,15 +87,17 @@ VTKM_THIRDPARTY_POST_INCLUDE
#include <mpi.h>
#include <cstdio>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <cstdio>
#include <string>
#include <utility>
#include <vector>
using ValueType = vtkm::Float64;
#define SINGLE_FILE_STDOUT_STDERR
// Simple helper class for parsing the command line options
@ -194,6 +197,12 @@ int main(int argc, char* argv[])
ParseCL parser;
parser.parse(argc, argv);
std::string filename = parser.getOptions().back();
bool augmentHierarchicalTree = false;
if (parser.hasOption("--augmentHierarchicalTree"))
{
augmentHierarchicalTree = true;
}
bool useBoundaryExtremaOnly = true;
if (parser.hasOption("--useFullBoundary"))
{
@ -222,10 +231,10 @@ int main(int argc, char* argv[])
{
saveDotFiles = true;
}
bool saveTreeCompilerData = false;
if (parser.hasOption("--saveTreeCompilerData"))
bool saveOutputData = false;
if (parser.hasOption("--saveOutputData"))
{
saveTreeCompilerData = true;
saveOutputData = true;
}
bool forwardSummary = false;
if (parser.hasOption("--forwardSummary"))
@ -270,19 +279,21 @@ int main(int argc, char* argv[])
<< std::endl;
std::cout << "--mc Use marching cubes connectivity (Default=False)." << std::endl;
std::cout << "--useFullBoundary Use the full boundary during. Typically only useful"
<< std::endl;
std::cout << " to compare the performance between using the full boundary"
<< std::endl
<< " to compare the performance between using the full boundary"
<< std::endl;
std::cout << " and when using only boundary extrema." << std::endl;
std::cout << "--augmentHierarchicalTree Augment the hierarchical tree." << std::endl;
std::cout << "--preSplitFiles Input data is already pre-split into blocks." << std::endl;
std::cout << "--saveDot Save DOT files of the distributed contour tree "
<< "computation (Default=False). " << std::endl;
std::cout << "--saveTreeCompilerData Save data files needed for the tree compiler"
std::cout << "--saveDot Save DOT files of the distributed contour tree " << std::endl
<< " computation (Default=False). " << std::endl;
std::cout << "--saveOutputData Save data files with hierarchical tree or volume data"
<< std::endl;
std::cout << "--numBlocks Number of blocks to use during computation "
<< "(Default=number of MPI ranks.)" << std::endl;
std::cout << "--forwardSummary Forward the summary timings also to the per-rank "
<< "log files. Default is to round-robin print the summary instead" << std::endl;
std::cout << "--forwardSummary Forward the summary timings also to the per-rank " << std::endl
<< " log files. Default is to round-robin print the " << std::endl
<< " summary instead" << std::endl;
std::cout << std::endl;
}
MPI_Finalize();
@ -300,7 +311,7 @@ int main(int argc, char* argv[])
<< " mc=" << useMarchingCubes << std::endl
<< " useFullBoundary=" << !useBoundaryExtremaOnly << std::endl
<< " saveDot=" << saveDotFiles << std::endl
<< " saveTreeCompilerData=" << saveTreeCompilerData << std::endl
<< " saveOutputData=" << saveOutputData << std::endl
<< " forwardSummary=" << forwardSummary << std::endl
<< " nblocks=" << numBlocks << std::endl);
}
@ -532,7 +543,6 @@ int main(int argc, char* argv[])
}
// Read data
using ValueType = vtkm::Float64;
std::vector<ValueType> values(numVertices);
if (filename.compare(filename.length() - 5, 5, ".bdem") == 0)
{
@ -618,7 +628,6 @@ int main(int argc, char* argv[])
vtkm::cont::DataSet inDataSet;
// Currently FloatDefualt would be fine, but it could cause problems if we ever
// read binary files here.
using ValueType = vtkm::Float64;
std::vector<ValueType> values;
std::vector<vtkm::Id> dims;
@ -840,6 +849,7 @@ int main(int argc, char* argv[])
localBlockSizes,
useBoundaryExtremaOnly,
useMarchingCubes,
augmentHierarchicalTree,
saveDotFiles,
timingsLogLevel,
treeLogLevel);
@ -858,42 +868,63 @@ int main(int argc, char* argv[])
vtkm::Float64 postFilterSyncTime = currTime - prevTime;
prevTime = currTime;
/*
std::cout << "Result dataset has " << result.GetNumberOfPartitions() << " partitions" << std::endl;
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
if (saveOutputData)
{
auto ds = result.GetPartition(ds_no);
for (vtkm::Id f_no = 0; f_no < ds.GetNumberOfFields(); ++f_no)
if (augmentHierarchicalTree)
{
auto field = ds.GetField(f_no);
std::cout << field.GetName() << ": ";
PrintArrayContents(field.GetData());
std::cout << std::endl;
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
auto ds = result.GetPartition(ds_no);
vtkm::worklet::contourtree_augmented::IdArrayType supernodes;
ds.GetField("Supernodes").GetData().AsArrayHandle(supernodes);
vtkm::worklet::contourtree_augmented::IdArrayType superarcs;
ds.GetField("Superarcs").GetData().AsArrayHandle(superarcs);
vtkm::worklet::contourtree_augmented::IdArrayType regularNodeGlobalIds;
ds.GetField("RegularNodeGlobalIds").GetData().AsArrayHandle(regularNodeGlobalIds);
vtkm::Id totalVolume = globalSize[0] * globalSize[1] * globalSize[2];
vtkm::worklet::contourtree_augmented::IdArrayType intrinsicVolume;
ds.GetField("IntrinsicVolume").GetData().AsArrayHandle(intrinsicVolume);
vtkm::worklet::contourtree_augmented::IdArrayType dependentVolume;
ds.GetField("DependentVolume").GetData().AsArrayHandle(dependentVolume);
std::string dumpVolumesString =
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<ValueType>::DumpVolumes(
supernodes,
superarcs,
regularNodeGlobalIds,
totalVolume,
intrinsicVolume,
dependentVolume);
std::string volumesFileName = std::string("TreeWithVolumes_Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(ds_no)) + std::string(".txt");
std::ofstream treeStream(volumesFileName.c_str());
treeStream << dumpVolumesString;
}
}
else
{
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
vtkm::worklet::contourtree_distributed::TreeCompiler treeCompiler;
treeCompiler.AddHierarchicalTree(result.GetPartition(ds_no));
char fname[256];
std::snprintf(fname,
sizeof(fname),
"TreeCompilerOutput_Rank%d_Block%d.dat",
rank,
static_cast<int>(ds_no));
FILE* out_file = std::fopen(fname, "wb");
treeCompiler.WriteBinary(out_file);
std::fclose(out_file);
}
}
}
*/
if (saveTreeCompilerData)
{
for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no)
{
vtkm::worklet::contourtree_distributed::TreeCompiler treeCompiler;
treeCompiler.AddHierarchicalTree(result.GetPartition(ds_no));
char fname[256];
std::snprintf(fname,
sizeof(fname),
"TreeCompilerOutput_Rank%d_Block%d.dat",
rank,
static_cast<int>(ds_no));
FILE* out_file = std::fopen(fname, "wb");
treeCompiler.WriteBinary(out_file);
std::fclose(out_file);
}
}
currTime = totalTime.GetElapsedTime();
vtkm::Float64 saveTreeCompilerDataTime = currTime - prevTime;
vtkm::Float64 saveOutputDataTime = currTime - prevTime;
prevTime = currTime;
std::cout << std::flush;
@ -942,7 +973,7 @@ int main(int argc, char* argv[])
<< std::setw(42) << std::left << " Post filter Sync"
<< ": " << postFilterSyncTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Save Tree Compiler Data"
<< ": " << saveTreeCompilerDataTime << " seconds" << std::endl
<< ": " << saveOutputDataTime << " seconds" << std::endl
<< std::setw(42) << std::left << " Total Time"
<< ": " << currTime << " seconds");

@ -24,12 +24,12 @@ rm ${filename}
echo "Running HACT"
n_parts=$(($2*$2))
echo mpirun -np 4 ./ContourTree_Distributed -d Any --numBlocks=${n_parts} ${fileroot}_part_%d_of_${n_parts}.txt
mpirun -np 4 ./ContourTree_Distributed --vtkm-device Any --preSplitFiles --saveTreeCompilerData --numBlocks=${n_parts} ${fileroot}_part_%d_of_${n_parts}.txt
rm ${fileroot}_part_*_of_${n_parts}.txt
echo "Compiling Outputs"
./TreeCompiler TreeCompilerOutput_*.dat | sort > outsort${fileroot}_$2x$2.txt
rm TreeCompilerOutput_*.dat
echo "Diffing"
diff outsort${fileroot}_$2x$2.txt ${GTCT_DIR}/outsort${fileroot}.txt

@ -0,0 +1,39 @@
#!/bin/sh
GTCT_DIR=${GTCT_DIR:-${HOME}/devel/parallel-peak-pruning/ContourTree/SweepAndMergeSerial/out}
RED=""
GREEN=""
NC=""
if [ -t 1 ]; then
# If stdout is a terminal, color Pass and FAIL green and red, respectively
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
NC=$(tput sgr0)
fi
echo "Removing previously generated files"
rm *.log *.dat
echo "Copying target file "$1 "into current directory"
filename=${1##*/}
fileroot=${filename%.txt}
cp $1 ${filename}
echo "Splitting data into "$2" x "$2" parts"
./split_data_2d.py ${filename} $2
rm ${filename}
echo "Running HACT"
n_parts=$(($2*$2))
mpirun -np 4 ./ContourTree_Distributed --vtkm-device Any --preSplitFiles --saveOutputData --augmentHierarchicalTree --numBlocks=${n_parts} ${fileroot}_part_%d_of_${n_parts}.txt
rm ${fileroot}_part_*_of_${n_parts}.txt
echo "Compiling Outputs"
sort -u TreeWithVolumes_Rank_*.txt > outsort${fileroot}_$2x$2.txt
rm TreeWithVolumes_Rank_*.txt
echo "Diffing"
diff outsort${fileroot}_$2x$2.txt ${GTCT_DIR}/outsort${fileroot}.txt
if test $? -eq 0; then echo "${GREEN}Pass${NC}"; rm outsort${fileroot}_$2x$2.txt; else echo "${RED}FAIL${NC}"; fi;
# echo "Generating Dot files"
# ./makedot.sh

@ -10,7 +10,7 @@ import operator
# Read a 3D text file from disk into a NumPy array
# ... Plain text
def read_file(fn):
data = np.fromfile(fn, dtype=np.float, sep=" ")
data = np.fromfile(fn, dtype=float, sep=" ")
data = data[3:].reshape((int(data[2]),int(data[0]),int(data[1])))
return data

@ -0,0 +1,99 @@
#!/bin/sh
mkdir -p out
DATA_DIR=${DATA_DIR:-${HOME}/devel/parallel-peak-pruning/Data/2D}
if [ ! -d $DATA_DIR ]; then
echo "Error: Directory $DATA_DIR does not exist!"
exit 1;
fi;
echo
echo "Starting Timing Runs"
echo
echo "8x9 Test Set"
./hact_test_volume.sh $DATA_DIR/8x9test.txt 2
./hact_test_volume.sh $DATA_DIR/8x9test.txt 4
# ./hact_test_volume.sh $DATA_DIR/8x9test.txt 8
echo
echo "Vancouver Test Set"
./hact_test_volume.sh $DATA_DIR/vanc.txt 2
./hact_test_volume.sh $DATA_DIR/vanc.txt 4
# ./hact_test_volume.sh $DATA_DIR/vanc.txt 8
# ./hact_test_volume.sh $DATA_DIR/vanc.txt 16
echo
echo "Vancouver SWSW Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSWSW.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSWSW.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSWSW.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSWSW.txt 16
echo
echo "Vancouver SWNW Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSWNW.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSWNW.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSWNW.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSWNW.txt 16
echo
echo "Vancouver SWSE Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSWSE.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSWSE.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSWSE.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSWSE.txt 16
echo
echo "Vancouver SWNE Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSWNE.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSWNE.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSWNE.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSWNE.txt 16
echo
echo "Vancouver NE Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverNE.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverNE.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverNE.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverNE.txt 16
echo
echo "Vancouver NW Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverNW.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverNW.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverNW.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverNW.txt 16
echo
echo "Vancouver SE Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSE.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSE.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSE.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSE.txt 16
echo
echo "Vancouver SW Test Set"
./hact_test_volume.sh $DATA_DIR/vancouverSW.txt 2
./hact_test_volume.sh $DATA_DIR/vancouverSW.txt 4
./hact_test_volume.sh $DATA_DIR/vancouverSW.txt 8
# ./hact_test_volume.sh $DATA_DIR/vancouverSW.txt 16
echo
echo "Icefields Test Set"
./hact_test_volume.sh $DATA_DIR/icefield.txt 2
./hact_test_volume.sh $DATA_DIR/icefield.txt 4
./hact_test_volume.sh $DATA_DIR/icefield.txt 8
# ./hact_test_volume.sh $DATA_DIR/icefield.txt 16
# ./hact_test_volume.sh $DATA_DIR/icefield.txt 32
# ./hact_test_volume.sh $DATA_DIR/icefield.txt 64
echo
# echo "GTOPO30 Full Tiny Test Set"
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 2
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 4
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 8
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 16
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 32
# ./hact_test_volume.sh $DATA_DIR/gtopo_full_tiny.txt 64
# echo
echo "GTOPO30 UK Tile Test Set"
./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 2
./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 4
./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 8
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 16
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 32
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 64
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 128
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 256
# ./hact_test_volume.sh $DATA_DIR/gtopo30w020n40.txt 512
echo "Done"

@ -118,6 +118,7 @@ public:
const vtkm::cont::ArrayHandle<vtkm::Id3>& localBlockSizes,
bool useBoundaryExtremaOnly = true,
bool useMarchingCubes = false,
bool augmentHierarchicalTree = false,
bool saveDotFiles = false,
vtkm::cont::LogLevel timingsLogLevel = vtkm::cont::LogLevel::Perf,
vtkm::cont::LogLevel treeLogLevel = vtkm::cont::LogLevel::Info);
@ -183,6 +184,9 @@ private:
/// Use marching cubes connectivity for computing the contour tree
bool UseMarchingCubes;
/// Augment hierarchical tree
bool AugmentHierarchicalTree;
/// Save dot files for all tree computations
bool SaveDotFiles;

@ -66,9 +66,13 @@
// distributed contour tree includes
#include <vtkm/worklet/contourtree_distributed/BoundaryTree.h>
#include <vtkm/worklet/contourtree_distributed/BoundaryTreeMaker.h>
#include <vtkm/worklet/contourtree_distributed/CombineHyperSweepBlockFunctor.h>
#include <vtkm/worklet/contourtree_distributed/ComputeDistributedContourTreeFunctor.h>
#include <vtkm/worklet/contourtree_distributed/DistributedContourTreeBlockData.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalAugmenter.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalAugmenterFunctor.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalHyperSweeper.h>
#include <vtkm/worklet/contourtree_distributed/HyperSweepBlock.h>
#include <vtkm/worklet/contourtree_distributed/InteriorForest.h>
#include <vtkm/worklet/contourtree_distributed/PrintGraph.h>
#include <vtkm/worklet/contourtree_distributed/SpatialDecomposition.h>
@ -144,7 +148,7 @@ void SaveAfterFanInResults(
sizeof(buffer),
"AfterFanInResults_Rank%d_Block%d.txt",
static_cast<int>(rank),
static_cast<int>(blockData->BlockIndex));
static_cast<int>(blockData->LocalBlockNo));
std::ofstream os(buffer);
os << "Contour Trees" << std::endl;
os << "=============" << std::endl;
@ -165,17 +169,17 @@ void SaveAfterFanInResults(
template <typename FieldType>
void SaveHierarchicalTreeDot(
vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>* blockData,
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType>& hierarchicalTree,
const vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>*
blockData,
vtkm::Id rank,
vtkm::Id nRounds)
{
std::string hierarchicalTreeFileName = std::string("Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(blockData->BlockIndex)) + std::string("_Round_") +
std::to_string(static_cast<int>(blockData->LocalBlockNo)) + std::string("_Round_") +
std::to_string(nRounds) + std::string("_Hierarchical_Tree.gv");
std::string hierarchicalTreeLabel = std::string("Block ") +
std::to_string(static_cast<int>(blockData->BlockIndex)) + std::string(" Round ") +
std::to_string(static_cast<int>(blockData->LocalBlockNo)) + std::string(" Round ") +
std::to_string(nRounds) + std::string(" Hierarchical Tree");
vtkm::Id hierarchicalTreeDotSettings =
vtkm::worklet::contourtree_distributed::SHOW_SUPER_STRUCTURE |
@ -186,7 +190,7 @@ void SaveHierarchicalTreeDot(
std::ofstream hierarchicalTreeFile(hierarchicalTreeFileName);
hierarchicalTreeFile
<< vtkm::worklet::contourtree_distributed::HierarchicalContourTreeDotGraphPrint<FieldType>(
hierarchicalTreeLabel, hierarchicalTree, hierarchicalTreeDotSettings);
hierarchicalTreeLabel, blockData->HierarchicalTree, hierarchicalTreeDotSettings);
}
} // end namespace contourtree_distributed_detail
@ -203,12 +207,14 @@ ContourTreeUniformDistributed::ContourTreeUniformDistributed(
const vtkm::cont::ArrayHandle<vtkm::Id3>& localBlockSizes,
bool useBoundaryExtremaOnly,
bool useMarchingCubes,
bool augmentHierarchicalTree,
bool saveDotFiles,
vtkm::cont::LogLevel timingsLogLevel,
vtkm::cont::LogLevel treeLogLevel)
: vtkm::filter::FilterField<ContourTreeUniformDistributed>()
, UseBoundaryExtremaOnly(useBoundaryExtremaOnly)
, UseMarchingCubes(useMarchingCubes)
, AugmentHierarchicalTree(augmentHierarchicalTree)
, SaveDotFiles(saveDotFiles)
, TimingsLogLevel(timingsLogLevel)
, TreeLogLevel(treeLogLevel)
@ -592,7 +598,9 @@ inline VTKM_CONT void ContourTreeUniformDistributed::PostExecute(
vtkm::cont::Timer timer;
timer.Start();
// We are running in parallel and need to merge the contour tree in PostExecute
// TODO/FIXME: Make sure this still makes sense
// TODO/FIXME: This filter should only be used in a parallel setting with more
// than one block. Is there a better way to enforce this? thrown an exception
// instead of an empty return? What is the appropriate exception?
if (this->MultiBlockSpatialDecomposition.GetGlobalNumberOfBlocks() == 1)
{
return;
@ -634,26 +642,71 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
vtkm::Id size = comm.size();
vtkm::Id rank = comm.rank();
// 1. Fan in to compute the hiearchical contour tree
// 1.1 Setup the block data for DIY
std::vector<vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>*>
localDataBlocks(static_cast<size_t>(input.GetNumberOfPartitions()), nullptr);
for (std::size_t bi = 0; bi < static_cast<std::size_t>(input.GetNumberOfPartitions()); bi++)
// ******** 1. Fan in to compute the hiearchical contour tree ********
// 1.1 Setup DIY to do global binary reduction of neighbouring blocks.
// See also RecuctionOperation struct for example
// 1.1.1 Create the vtkmdiy master ...
using DistributedContourTreeBlockData =
vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>;
vtkmdiy::Master master(comm,
1, // Use 1 thread, VTK-M will do the treading
-1, // All blocks in memory
0, // No create function (since all blocks in memory)
DistributedContourTreeBlockData::destroy);
// ... and record time for creating the DIY master
timingsStream << " " << std::setw(38) << std::left << "Create DIY Master"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.1.2 Compute the gids for our local blocks
using RegularDecomposer = vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>;
const auto& spatialDecomp = this->MultiBlockSpatialDecomposition;
const auto numDims = spatialDecomp.NumberOfDimensions();
// ... compute division vector for global domain
RegularDecomposer::DivisionsVector diyDivisions(numDims);
for (vtkm::IdComponent d = 0; d < static_cast<vtkm::IdComponent>(numDims); ++d)
{
diyDivisions[d] = static_cast<int>(spatialDecomp.BlocksPerDimension[d]);
}
// ... compute coordinates of local blocks
auto localBlockIndicesPortal = spatialDecomp.LocalBlockIndices.ReadPortal();
std::vector<int> vtkmdiyLocalBlockGids(static_cast<size_t>(input.GetNumberOfPartitions()));
for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++)
{
RegularDecomposer::DivisionsVector diyCoords(static_cast<size_t>(numDims));
auto currentCoords = localBlockIndicesPortal.Get(bi);
for (vtkm::IdComponent d = 0; d < numDims; ++d)
{
diyCoords[d] = static_cast<int>(currentCoords[d]);
}
vtkmdiyLocalBlockGids[static_cast<size_t>(bi)] =
RegularDecomposer::coords_to_gid(diyCoords, diyDivisions);
}
// Record time to compute the local block ids
timingsStream << " " << std::setw(38) << std::left << "Compute Block Ids and Local Links"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.1.3 Setup the block data for DIY and add it to master
for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++)
{
// Create the local data block structure and set extents
localDataBlocks[bi] =
new vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>();
localDataBlocks[bi]->BlockIndex = static_cast<vtkm::Id>(bi);
localDataBlocks[bi]->BlockOrigin =
this->MultiBlockSpatialDecomposition.LocalBlockOrigins.ReadPortal().Get(
static_cast<vtkm::Id>(bi));
localDataBlocks[bi]->BlockSize =
this->MultiBlockSpatialDecomposition.LocalBlockSizes.ReadPortal().Get(
static_cast<vtkm::Id>(bi));
auto newBlock = new DistributedContourTreeBlockData();
// Save local tree information for fan out FIXME: Try to avoid copy
localDataBlocks[bi]->ContourTrees.push_back(this->LocalContourTrees[bi]);
localDataBlocks[bi]->InteriorForests.push_back(this->LocalInteriorForests[bi]);
// Copy global block id into the local data block for use in the hierarchical augmentation
newBlock->GlobalBlockId = vtkmdiyLocalBlockGids[bi];
newBlock->LocalBlockNo = bi;
newBlock->BlockOrigin = spatialDecomp.LocalBlockOrigins.ReadPortal().Get(bi);
newBlock->BlockSize = spatialDecomp.LocalBlockSizes.ReadPortal().Get(bi);
// Save local tree information for fan out; TODO/FIXME: Try to avoid copy
newBlock->ContourTrees.push_back(this->LocalContourTrees[bi]);
newBlock->InteriorForests.push_back(this->LocalInteriorForests[bi]);
// ... Compute arrays needed for constructing contour tree mesh
const auto sortOrder = this->LocalMeshes[bi].SortOrder;
@ -683,15 +736,23 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
vtkm::cont::ArrayCopy(currField.GetData(), fieldData);
// ... compute and store the actual mesh
localDataBlocks[bi]->ContourTreeMeshes.emplace_back(this->LocalBoundaryTrees[bi].VertexIndex,
this->LocalBoundaryTrees[bi].Superarcs,
sortOrder,
fieldData,
localGlobalMeshIndex);
newBlock->ContourTreeMeshes.emplace_back(this->LocalBoundaryTrees[bi].VertexIndex,
this->LocalBoundaryTrees[bi].Superarcs,
sortOrder,
fieldData,
localGlobalMeshIndex);
// NOTE: Use dummy link to make DIY happy. The dummy link is never used, since all
// communication is via RegularDecomposer, which sets up its own links. No need
// to keep the pointer, as DIY will "own" it and delete it when no longer needed.
// NOTE: Since we passed a "destroy" function to DIY master, it will own the local data
// blocks and delete them when done.
master.add(vtkmdiyLocalBlockGids[bi], newBlock, new vtkmdiy::Link);
} // for
// Record time for setting block data
timingsStream << " " << std::setw(38) << std::left << "Compute Block Data for Fan In"
// Record time for computing block data and adding it to master
timingsStream << " " << std::setw(38) << std::left
<< "Computing Block Data for Fan In and Adding Data Blocks to DIY"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
@ -700,99 +761,36 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
// should not be significnatly more expensive then doing it all in one loop
if (this->SaveDotFiles)
{
for (std::size_t bi = 0; bi < static_cast<std::size_t>(input.GetNumberOfPartitions()); bi++)
{
master.foreach ([&](DistributedContourTreeBlockData* b, const vtkmdiy::Master::ProxyWithLink&) {
// save the contour tree mesh
std::string contourTreeMeshFileName = std::string("Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(bi)) + std::string("_Initial_Step_3_BRACT_Mesh.txt");
localDataBlocks[bi]->ContourTreeMeshes.back().Save(contourTreeMeshFileName.c_str());
std::to_string(static_cast<int>(b->LocalBlockNo)) +
std::string("_Initial_Step_3_BRACT_Mesh.txt");
b->ContourTreeMeshes.back().Save(contourTreeMeshFileName.c_str());
// save the corresponding .gv file
std::string boundaryTreeMeshFileName = std::string("Rank_") +
std::to_string(static_cast<int>(rank)) + std::string("_Block_") +
std::to_string(static_cast<int>(bi)) + std::string("_Initial_Step_5_BRACT_Mesh.gv");
std::to_string(static_cast<int>(b->LocalBlockNo)) +
std::string("_Initial_Step_5_BRACT_Mesh.gv");
std::ofstream boundaryTreeMeshFile(boundaryTreeMeshFileName);
boundaryTreeMeshFile
<< vtkm::worklet::contourtree_distributed::ContourTreeMeshDotGraphPrint<FieldType>(
std::string("Block ") + std::to_string(static_cast<int>(rank)) +
std::string(" Initial Step 5 BRACT Mesh"),
localDataBlocks[bi]->ContourTreeMeshes.back(),
b->ContourTreeMeshes.back(),
worklet::contourtree_distributed::SHOW_CONTOUR_TREE_MESH_ALL);
} // for
} // // if(SaveDotFiles)
}); // master.for_each
} // // if(SaveDotFiles)
// Record time for saving debug data
timingsStream << " " << std::setw(38) << std::left << "Save block data for debug"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.2 Setup vtkmdiy to do global binary reduction of neighbouring blocks.
// See also RecuctionOperation struct for example
// Create the vtkmdiy master
vtkmdiy::Master master(comm,
1, // Use 1 thread, VTK-M will do the treading
-1 // All block in memory
);
// Record time for creating the DIY master
timingsStream << " " << std::setw(38) << std::left << "Create DIY Master"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.2.1 Compute the gids for our local blocks
using RegularDecomposer = vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>;
const vtkm::worklet::contourtree_distributed::SpatialDecomposition& spatialDecomp =
this->MultiBlockSpatialDecomposition;
const auto numDims = spatialDecomp.NumberOfDimensions();
// ... division vector
RegularDecomposer::DivisionsVector diyDivisions(numDims);
for (vtkm::IdComponent d = 0;
d < static_cast<vtkm::IdComponent>(spatialDecomp.NumberOfDimensions());
++d)
{
diyDivisions[d] = static_cast<int>(spatialDecomp.BlocksPerDimension[d]);
}
// ... coordinates of local blocks
auto localBlockIndicesPortal = spatialDecomp.LocalBlockIndices.ReadPortal();
std::vector<vtkm::Id> vtkmdiyLocalBlockGids(static_cast<size_t>(input.GetNumberOfPartitions()));
for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++)
{
RegularDecomposer::DivisionsVector diyCoords(static_cast<size_t>(numDims));
auto currentCoords = localBlockIndicesPortal.Get(bi);
for (vtkm::IdComponent d = 0; d < numDims; ++d)
{
diyCoords[d] = static_cast<int>(currentCoords[d]);
}
vtkmdiyLocalBlockGids[static_cast<size_t>(bi)] =
RegularDecomposer::coords_to_gid(diyCoords, diyDivisions);
}
// Record time to compute the local block ids
timingsStream << " " << std::setw(38) << std::left << "Compute Block Ids and Local Links"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.2.2 Add my local blocks to the vtkmdiy master.
for (std::size_t bi = 0; bi < static_cast<std::size_t>(input.GetNumberOfPartitions()); bi++)
{
master.add(static_cast<int>(vtkmdiyLocalBlockGids[bi]), // block id
localDataBlocks[bi],
new vtkmdiy::Link); // Use dummy link to make DIY happy.
// NOTE: The dummy link is never used, since all communication is via RegularDecomposer,
// which sets up its own links
// NOTE: No need to keep the pointer, as DIY will "own" it and delete it when no longer
// needed TODO/FIXME: Confirm that last statement
}
// Record time for dding data blocks to the master
timingsStream << " " << std::setw(38) << std::left << "Add Data Blocks to DIY"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.2.3 Define the decomposition of the domain into regular blocks
// 1.2 Set up DIY for binary reduction
// 1.2.1 Define the decomposition of the domain into regular blocks
RegularDecomposer::BoolVector shareFace(3, true);
RegularDecomposer::BoolVector wrap(3, false);
RegularDecomposer::CoordinateVector ghosts(3, 1);
@ -809,8 +807,7 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
comm, static_cast<int>(size), static_cast<int>(spatialDecomp.GetGlobalNumberOfBlocks()));
for (vtkm::Id bi = 0; bi < input.GetNumberOfPartitions(); bi++)
{
assigner.set_rank(static_cast<int>(rank),
static_cast<int>(vtkmdiyLocalBlockGids[static_cast<size_t>(bi)]));
assigner.set_rank(static_cast<int>(rank), vtkmdiyLocalBlockGids[static_cast<size_t>(bi)]);
}
// Record time for creating the decomposer and assigner
@ -818,7 +815,7 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.2.4 Fix the vtkmdiy links.
// 1.2.2 Fix the vtkmdiy links.
vtkmdiy::fix_links(master, assigner);
// Record time to fix the links
@ -837,15 +834,13 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
timingsStream << " " << std::setw(38) << std::left << "Create DIY Swap Partners"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 1.3 execute the fan in reduction
// 1.3 Perform fan-in reduction
const vtkm::worklet::contourtree_distributed::ComputeDistributedContourTreeFunctor<FieldType>
computeDistributedContourTreeFunctor(this->MultiBlockSpatialDecomposition.GlobalSize,
this->UseBoundaryExtremaOnly,
this->TimingsLogLevel,
this->TreeLogLevel);
vtkmdiy::reduce(master, assigner, partners, computeDistributedContourTreeFunctor);
// Record timing for the actual reduction
timingsStream << " " << std::setw(38) << std::left << "Fan In Reduction"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
@ -858,154 +853,352 @@ VTKM_CONT void ContourTreeUniformDistributed::DoPostExecute(
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// 2. Fan out to update all the tree
// 2.1 DataSets for creating output data
std::vector<vtkm::cont::DataSet> hierarchicalTreeOutputDataSet(localDataBlocks.size());
// 2.2. Use foreach to compute the fan-out
master.foreach (
[&](
vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>* blockData,
const vtkmdiy::Master::ProxyWithLink&) {
// ******** 2. Fan out to update all the tree ********
master.foreach ([&](DistributedContourTreeBlockData* blockData,
const vtkmdiy::Master::ProxyWithLink&) {
#ifdef DEBUG_PRINT_CTUD
// Save the contour tree, contour tree meshes, and interior forest data for debugging
vtkm::filter::contourtree_distributed_detail::SaveAfterFanInResults(
blockData, rank, this->TreeLogLevel);
// Save the contour tree, contour tree meshes, and interior forest data for debugging
vtkm::filter::contourtree_distributed_detail::SaveAfterFanInResults(
blockData, rank, this->TreeLogLevel);
#endif
vtkm::cont::Timer iterationTimer;
vtkm::cont::Timer iterationTimer;
iterationTimer.Start();
std::stringstream fanoutTimingsStream;
// Fan out
auto nRounds = blockData->ContourTrees.size() - 1;
blockData->HierarchicalTree.Initialize(static_cast<vtkm::Id>(nRounds),
blockData->ContourTrees[nRounds],
blockData->ContourTreeMeshes[nRounds - 1]);
// save the corresponding .gv file
if (this->SaveDotFiles)
{
vtkm::filter::contourtree_distributed_detail::SaveHierarchicalTreeDot(
blockData, rank, nRounds);
} // if(this->SaveDotFiles)
fanoutTimingsStream << " Fan Out Init Hierarchical Tree (block=" << blockData->LocalBlockNo
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
iterationTimer.Start();
for (auto round = nRounds - 1; round > 0; round--)
{
iterationTimer.Start();
std::stringstream fanoutTimingsStream;
// Fan out
auto nRounds = blockData->ContourTrees.size() - 1;
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType> hierarchicalTree;
hierarchicalTree.Initialize(static_cast<vtkm::Id>(nRounds),
blockData->ContourTrees[nRounds],
blockData->ContourTreeMeshes[nRounds - 1]);
vtkm::worklet::contourtree_distributed::
TreeGrafter<vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType>, FieldType>
grafter(&(blockData->ContourTreeMeshes[round - 1]),
blockData->ContourTrees[round],
&(blockData->InteriorForests[round]));
grafter.GraftInteriorForests(static_cast<vtkm::Id>(round),
blockData->HierarchicalTree,
blockData->ContourTreeMeshes[round - 1].SortedValues);
// save the corresponding .gv file
if (this->SaveDotFiles)
{
vtkm::filter::contourtree_distributed_detail::SaveHierarchicalTreeDot(
blockData, hierarchicalTree, rank, nRounds);
blockData, rank, nRounds);
} // if(this->SaveDotFiles)
fanoutTimingsStream << " Fan Out Init Hierarchical Tree (block=" << blockData->BlockIndex
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
iterationTimer.Start();
for (auto round = nRounds - 1; round > 0; round--)
{
iterationTimer.Start();
vtkm::worklet::contourtree_distributed::
TreeGrafter<vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType>, FieldType>
grafter(&(blockData->ContourTreeMeshes[round - 1]),
blockData->ContourTrees[round],
&(blockData->InteriorForests[round]));
grafter.GraftInteriorForests(static_cast<vtkm::Id>(round),
hierarchicalTree,
blockData->ContourTreeMeshes[round - 1].SortedValues);
// save the corresponding .gv file
if (this->SaveDotFiles)
{
vtkm::filter::contourtree_distributed_detail::SaveHierarchicalTreeDot(
blockData, hierarchicalTree, rank, nRounds);
} // if(this->SaveDotFiles)
// Log the time for each of the iterations of the fan out loop
fanoutTimingsStream << " Fan Out Time (block=" << blockData->BlockIndex
<< " , round=" << round << ") : " << iterationTimer.GetElapsedTime()
<< " seconds" << std::endl;
} // for
// bottom level
iterationTimer.Start();
vtkm::worklet::contourtree_distributed::
TreeGrafter<vtkm::worklet::contourtree_augmented::DataSetMesh, FieldType>
grafter(&(this->LocalMeshes[static_cast<std::size_t>(blockData->BlockIndex)]),
blockData->ContourTrees[0],
&(blockData->InteriorForests[0]));
auto currBlock = input.GetPartition(blockData->BlockIndex);
auto currField =
currBlock.GetField(this->GetActiveFieldName(), this->GetActiveFieldAssociation());
vtkm::cont::ArrayHandle<FieldType> fieldData;
vtkm::cont::ArrayCopy(currField.GetData(), fieldData);
auto localToGlobalIdRelabeler = vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler(
this->MultiBlockSpatialDecomposition.LocalBlockOrigins.ReadPortal().Get(
blockData->BlockIndex),
this->MultiBlockSpatialDecomposition.LocalBlockSizes.ReadPortal().Get(
blockData->BlockIndex),
this->MultiBlockSpatialDecomposition.GlobalSize);
grafter.GraftInteriorForests(0, hierarchicalTree, fieldData, &localToGlobalIdRelabeler);
// Log the time for each of the iterations of the fan out loop
fanoutTimingsStream << " Fan Out Time (block=" << blockData->BlockIndex << " , round=" << 0
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
fanoutTimingsStream << " Fan Out Time (block=" << blockData->LocalBlockNo
<< " , round=" << round << ") : " << iterationTimer.GetElapsedTime()
<< " seconds" << std::endl;
} // for
// bottom level
iterationTimer.Start();
vtkm::worklet::contourtree_distributed::
TreeGrafter<vtkm::worklet::contourtree_augmented::DataSetMesh, FieldType>
grafter(&(this->LocalMeshes[static_cast<std::size_t>(blockData->LocalBlockNo)]),
blockData->ContourTrees[0],
&(blockData->InteriorForests[0]));
auto currBlock = input.GetPartition(blockData->LocalBlockNo);
auto currField =
currBlock.GetField(this->GetActiveFieldName(), this->GetActiveFieldAssociation());
vtkm::cont::ArrayHandle<FieldType> fieldData;
vtkm::cont::ArrayCopy(currField.GetData(), fieldData);
auto localToGlobalIdRelabeler = vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler(
this->MultiBlockSpatialDecomposition.LocalBlockOrigins.ReadPortal().Get(
blockData->LocalBlockNo),
this->MultiBlockSpatialDecomposition.LocalBlockSizes.ReadPortal().Get(
blockData->LocalBlockNo),
this->MultiBlockSpatialDecomposition.GlobalSize);
grafter.GraftInteriorForests(
0, blockData->HierarchicalTree, fieldData, &localToGlobalIdRelabeler);
// Log the time for each of the iterations of the fan out loop
fanoutTimingsStream << " Fan Out Time (block=" << blockData->LocalBlockNo << " , round=" << 0
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
// Log the timing stats we collected
VTKM_LOG_S(this->TimingsLogLevel,
std::endl
<< " ------------ Fan Out (block=" << blockData->LocalBlockNo
<< ") ------------" << std::endl
<< fanoutTimingsStream.str());
});
// 2.2 Log timings for fan out
timingsStream << " " << std::setw(38) << std::left << "Fan Out Foreach"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// ******** 3. Augment the hierarchical tree if requested ********
if (this->AugmentHierarchicalTree)
{
master.foreach (
[](DistributedContourTreeBlockData* blockData, const vtkmdiy::Master::ProxyWithLink&) {
blockData->HierarchicalAugmenter.Initialize(
blockData->GlobalBlockId, &blockData->HierarchicalTree, &blockData->AugmentedTree);
});
timingsStream << " " << std::setw(38) << std::left << "Initalize Hierarchical Trees"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
vtkmdiy::reduce(
master,
assigner,
partners,
vtkm::worklet::contourtree_distributed::HierarchicalAugmenterFunctor<FieldType>{});
// Clear all swap data as it is no longer needed
master.foreach (
[](DistributedContourTreeBlockData* blockData, const vtkmdiy::Master::ProxyWithLink&) {
blockData->HierarchicalAugmenter.ReleaseSwapArrays();
});
timingsStream << " " << std::setw(38) << std::left << "Retrieve In Attachment Points"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
master.foreach (
[](DistributedContourTreeBlockData* blockData, const vtkmdiy::Master::ProxyWithLink&) {
blockData->HierarchicalAugmenter.BuildAugmentedTree();
});
timingsStream << " " << std::setw(38) << std::left << "Build Augmented Tree"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
}
// ******** 4. Create output data set ********
std::vector<vtkm::cont::DataSet> hierarchicalTreeOutputDataSet(master.size());
master.foreach (
[&](DistributedContourTreeBlockData* blockData, const vtkmdiy::Master::ProxyWithLink&) {
std::stringstream createOutdataTimingsStream;
vtkm::cont::Timer iterationTimer;
iterationTimer.Start();
// Use the augmented tree if available or otherwise use the unaugmented hierarchical tree from the current block
auto blockHierarchcialTree = this->AugmentHierarchicalTree
? (*blockData->HierarchicalAugmenter.AugmentedTree)
: blockData->HierarchicalTree;
// Create data set from output
vtkm::cont::Field dataValuesField(
"DataValues", vtkm::cont::Field::Association::WHOLE_MESH, hierarchicalTree.DataValues);
hierarchicalTreeOutputDataSet[blockData->BlockIndex].AddField(dataValuesField);
"DataValues", vtkm::cont::Field::Association::WHOLE_MESH, blockHierarchcialTree.DataValues);
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].AddField(dataValuesField);
vtkm::cont::Field regularNodeGlobalIdsField("RegularNodeGlobalIds",
vtkm::cont::Field::Association::WHOLE_MESH,
hierarchicalTree.RegularNodeGlobalIds);
hierarchicalTreeOutputDataSet[blockData->BlockIndex].AddField(regularNodeGlobalIdsField);
blockHierarchcialTree.RegularNodeGlobalIds);
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].AddField(regularNodeGlobalIdsField);
vtkm::cont::Field superarcsField(
"Superarcs", vtkm::cont::Field::Association::WHOLE_MESH, hierarchicalTree.Superarcs);
hierarchicalTreeOutputDataSet[blockData->BlockIndex].AddField(superarcsField);
"Superarcs", vtkm::cont::Field::Association::WHOLE_MESH, blockHierarchcialTree.Superarcs);
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].AddField(superarcsField);
vtkm::cont::Field supernodesField(
"Supernodes", vtkm::cont::Field::Association::WHOLE_MESH, hierarchicalTree.Supernodes);
hierarchicalTreeOutputDataSet[blockData->BlockIndex].AddField(supernodesField);
vtkm::cont::Field superparentsField(
"Superparents", vtkm::cont::Field::Association::WHOLE_MESH, hierarchicalTree.Superparents);
hierarchicalTreeOutputDataSet[blockData->BlockIndex].AddField(superparentsField);
"Supernodes", vtkm::cont::Field::Association::WHOLE_MESH, blockHierarchcialTree.Supernodes);
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].AddField(supernodesField);
vtkm::cont::Field superparentsField("Superparents",
vtkm::cont::Field::Association::WHOLE_MESH,
blockHierarchcialTree.Superparents);
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].AddField(superparentsField);
// Copy cell set from input data set. This is mainly to ensure that the output data set
// has a defined cell set. Without one, serialization for DIY does not work properly.
// Having the extents of the input data set may also help in other use cases.
hierarchicalTreeOutputDataSet[blockData->BlockIndex].SetCellSet(
input.GetPartition(blockData->BlockIndex).GetCellSet());
hierarchicalTreeOutputDataSet[blockData->LocalBlockNo].SetCellSet(
input.GetPartition(blockData->LocalBlockNo).GetCellSet());
// Log the time for each of the iterations of the fan out loop
fanoutTimingsStream << " Fan Out Create Output Dataset (block=" << blockData->BlockIndex
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
createOutdataTimingsStream << " Fan Out Create Output Dataset (block="
<< blockData->LocalBlockNo
<< ") : " << iterationTimer.GetElapsedTime() << " seconds"
<< std::endl;
iterationTimer.Start();
// save the corresponding .gv file
if (this->SaveDotFiles)
{
auto nRounds = blockData->ContourTrees.size() - 1;
vtkm::filter::contourtree_distributed_detail::SaveHierarchicalTreeDot(
blockData, hierarchicalTree, rank, nRounds);
blockData, rank, nRounds);
fanoutTimingsStream << " Fan Out Save Dot (block=" << blockData->BlockIndex
<< ") : " << iterationTimer.GetElapsedTime() << " seconds" << std::endl;
createOutdataTimingsStream << " Fan Out Save Dot (block=" << blockData->LocalBlockNo
<< ") : " << iterationTimer.GetElapsedTime() << " seconds"
<< std::endl;
iterationTimer.Start();
} // if(this->SaveDotFiles)
// Log the timing stats we collected
VTKM_LOG_S(this->TimingsLogLevel,
std::endl
<< " ------------ Fan Out (block=" << blockData->BlockIndex
<< " ------------ Create Output Data (block=" << blockData->LocalBlockNo
<< ") ------------" << std::endl
<< fanoutTimingsStream.str());
<< createOutdataTimingsStream.str());
// Log the stats from the hierarchical contour tree
VTKM_LOG_S(this->TreeLogLevel,
std::endl
<< " ------------ Hierarchical Tree Construction Stats ------------"
<< std::endl
<< std::setw(42) << std::left << " BlockIndex"
<< ": " << blockData->BlockIndex << std::endl
<< hierarchicalTree.PrintTreeStats() << std::endl);
<< std::setw(42) << std::left << " LocalBlockNo"
<< ": " << blockData->LocalBlockNo << std::endl
<< blockData->HierarchicalTree.PrintTreeStats() << std::endl);
}); // master.foreach
// Clean-up
for (auto block : localDataBlocks)
delete block;
// 2.2 Log timings for fan out
timingsStream << " " << std::setw(38) << std::left << "Fan Out Foreach"
// 3.1 Log total augmentation time
timingsStream << " " << std::setw(38) << std::left << "Create Output Data"
<< ": " << timer.GetElapsedTime() << " seconds" << std::endl;
timer.Start();
// BEGIN: CONSIDER MOVING TO SEPARATE FILTER
// ******** 5. Compute associated metric (volume) ********
if (this->AugmentHierarchicalTree)
{
using HyperSweepBlock = vtkm::worklet::contourtree_distributed::HyperSweepBlock<FieldType>;
vtkmdiy::Master hierarchical_hyper_sweep_master(comm,
1, // Use 1 thread, VTK-M will do the treading
-1, // All blocks in memory
0, // No create function
HyperSweepBlock::destroy);
// Copy data from hierarchical tree computation to initialize volume computation
master.foreach ([&](DistributedContourTreeBlockData* currInBlock,
const vtkmdiy::Master::ProxyWithLink&) {
vtkm::Id blockNo = currInBlock->LocalBlockNo;
// The block size and origin may be modified during the FanIn so we need to use the
// size and origin from the original decomposition instead of looking it up in the currInBlock
auto currBlockSize =
this->MultiBlockSpatialDecomposition.LocalBlockSizes.ReadPortal().Get(blockNo);
auto currBlockOrigin =
this->MultiBlockSpatialDecomposition.LocalBlockOrigins.ReadPortal().Get(blockNo);
// NOTE: Use dummy link to make DIY happy. The dummy link is never used, since all
// communication is via RegularDecomposer, which sets up its own links. No need
// to keep the pointer, as DIY will "own" it and delete it when no longer needed.
// NOTE: Since we passed a "destroy" function to DIY master, it will own the local data
// blocks and delete them when done.
hierarchical_hyper_sweep_master.add(
currInBlock->GlobalBlockId,
new HyperSweepBlock(blockNo,
currInBlock->GlobalBlockId,
currBlockOrigin,
currBlockSize,
spatialDecomp.GlobalSize,
*currInBlock->HierarchicalAugmenter.AugmentedTree),
new vtkmdiy::Link());
});
vtkmdiy::fix_links(hierarchical_hyper_sweep_master, assigner);
hierarchical_hyper_sweep_master.foreach (
[](HyperSweepBlock* b, const vtkmdiy::Master::ProxyWithLink&) {
// Create HyperSweeper
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info, "Block " << b->GlobalBlockId);
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
b->HierarchicalContourTree.DebugPrint(
"Before initializing HierarchicalHyperSweeper", __FILE__, __LINE__));
#endif
vtkm::worklet::contourtree_distributed::HierarchicalHyperSweeper<vtkm::Id, FieldType>
hyperSweeper(
b->GlobalBlockId, b->HierarchicalContourTree, b->IntrinsicVolume, b->DependentVolume);
// Create mesh and initialize vertex counts
vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler idRelabeler{ b->Origin,
b->Size,
b->GlobalSize };
if (b->GlobalSize[2] <= 1)
{
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation2DFreudenthal mesh(
vtkm::Id2{ b->Size[0], b->Size[1] });
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
else
{
// For getting owned vertices, it does not make a difference if we are using marching cubes or not.
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation3DFreudenthal mesh(b->Size);
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info, "Block " << b->GlobalBlockId);
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
b->HierarchicalContourTree.DebugPrint(
"After initializing intrinsic vertex count", __FILE__, __LINE__));
std::ostringstream volumeStream;
vtkm::worklet::contourtree_augmented::PrintHeader(b->IntrinsicVolume.GetNumberOfValues(),
volumeStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, volumeStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, volumeStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, volumeStream.str());
VTKM_LOG_S(vtkm::cont::LogLevel::Info, "FLUSH" << std::endl << std::flush);
#endif
// Initialize dependentVolume by copy from intrinsicVolume
vtkm::cont::Algorithm::Copy(b->IntrinsicVolume, b->DependentVolume);
// Perform the local hypersweep
hyperSweeper.LocalHyperSweep();
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info, "Block " << b->GlobalBlockId);
VTKM_LOG_S(
vtkm::cont::LogLevel::Info,
b->HierarchicalContourTree.DebugPrint("After local hypersweep", __FILE__, __LINE__));
#endif
});
// Reduce
// partners for merge over regular block grid
vtkmdiy::reduce(
hierarchical_hyper_sweep_master,
assigner,
partners,
vtkm::worklet::contourtree_distributed::CobmineHyperSweepBlockFunctor<FieldType>{});
// Print & add to output data set
//std::vector<vtkm::cont::DataSet> hierarchicalTreeAndVolumeOutputDataSet(localDataBlocks.size());
hierarchical_hyper_sweep_master.foreach (
[&](HyperSweepBlock* b, const vtkmdiy::Master::ProxyWithLink&) {
vtkm::cont::Field intrinsicVolumeField(
"IntrinsicVolume", vtkm::cont::Field::Association::WHOLE_MESH, b->IntrinsicVolume);
hierarchicalTreeOutputDataSet[b->LocalBlockNo].AddField(intrinsicVolumeField);
vtkm::cont::Field dependentVolumeField(
"DependentVolume", vtkm::cont::Field::Association::WHOLE_MESH, b->DependentVolume);
hierarchicalTreeOutputDataSet[b->LocalBlockNo].AddField(dependentVolumeField);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info, "Block " << b->GlobalBlockId);
VTKM_LOG_S(
vtkm::cont::LogLevel::Info,
b->HierarchicalContourTree.DebugPrint("Called from DumpVolumes", __FILE__, __LINE__));
std::ostringstream volumeStream;
vtkm::worklet::contourtree_augmented::PrintHeader(b->IntrinsicVolume.GetNumberOfValues(),
volumeStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, volumeStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, volumeStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, volumeStream.str());
#endif
});
} // end if(this->AugmentHierarchicalTree)
// END: THIS SHOULD GO INTO A SEPARATE FILTER
VTKM_LOG_S(this->TimingsLogLevel,
std::endl

@ -293,6 +293,7 @@ inline vtkm::cont::PartitionedDataSet RunContourTreeDUniformDistributed(
!useMarchingCubes,
useMarchingCubes,
false,
false,
vtkm::cont::LogLevel::UserVerboseLast,
vtkm::cont::LogLevel::UserVerboseLast);
filter.SetActiveField(fieldName);

@ -16,7 +16,7 @@ set(headers
DataSetMesh.h
MergeTree.h
MeshExtrema.h
PointerDoubling.h
NotNoSuchElementPredicate.h
PrintVectors.h
ProcessContourTree.h
Types.h

@ -253,6 +253,7 @@ inline void ContourTree::PrintDotSuperStructure() const
printf("digraph G\n\t{\n");
printf("\tsize=\"6.5, 9\"\n\tratio=\"fill\"\n");
// We use regular ReadPortal here since we need access to most values on the host anyways
auto whenTransferredPortal = this->WhenTransferred.ReadPortal();
auto supernodesPortal = this->Supernodes.ReadPortal();
auto superarcsPortal = this->Superarcs.ReadPortal();
@ -341,6 +342,7 @@ inline std::string ContourTree::PrintHyperStructureStatistics(bool print) const
std::vector<vtkm::Id> maxPath;
std::vector<vtkm::Id> supernodeCount;
std::vector<vtkm::Id> hypernodeCount;
// We use regular ReadPortal here since we need access to all values anyways
auto whenTransferredPortal = this->WhenTransferred.ReadPortal();
auto hypernodesPortal = this->Hypernodes.ReadPortal();

@ -78,8 +78,10 @@
#include <vtkm/cont/ArrayHandlePermutation.h>
#include <vtkm/cont/Invoker.h>
#include <vtkm/worklet/contourtree_augmented/NotNoSuchElementPredicate.h>
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/GetOwnedVerticesByGlobalIdWorklet.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/IdRelabeler.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/SimulatedSimplicityComperator.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/SortIndices.h>
@ -185,11 +187,63 @@ public:
void DebugPrint(const char* message, const char* fileName, long lineNum);
protected:
//TODO/FIXME: Update comment, possibly refactor and move somewhere else (helper function outside class?)
///Compute a list of the global Iss of all vertices that logically belong to the data block represented by this
///mesh object (used in distributd parallel computation). This is needed to avoid multiple counting on bousndaries
///in the hierarchy during distributed parallel contour tree computation.
/// Implementation of GetOwnedVerticesByGlobalId used internally by derived classes to
/// implement the specific variant of the function .The implementations vary based on the
/// MeshBoundary object used, and so derived classes just need to specify their mesh
/// boundary object and then call this funtion
/// @param[in] mesh For derived meshes set simply to this. Derived meshes inherit also from ExecutionObjectBase
/// and as such have PrepareForExecution functions that return a MeshBoundary object that
/// we can use here. We are passing in the mesh since the base DataSetMesh class does
/// not know about MeshBoundary classes and so we are passing the mesh in.
/// @param[out] ownedVertices List of vertices that logically belong to
template <typename MeshTypeObj>
void GetOwnedVerticesByGlobalIdImpl(
const MeshTypeObj* mesh,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const;
virtual void DebugPrintExtends();
template <typename T, typename StorageType>
void DebugPrintValues(const vtkm::cont::ArrayHandle<T, StorageType>& values);
}; // class DataSetMesh
// Implementation of GetOwnedVerticesByGlobalId used by subclasses
template <typename MeshTypeObj>
void DataSetMesh::GetOwnedVerticesByGlobalIdImpl(
const MeshTypeObj* mesh,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const
{
// use temporary array since we need to compress these at the end via CopyIf so we
// can move the values to keep to the ownedVertices ouput array then
IdArrayType tempOwnedVertices;
// Fancy array for the running mesh index
vtkm::cont::ArrayHandleIndex meshIndexArray(this->GetNumberOfVertices());
auto ownedVerticesWorklet =
vtkm::worklet::contourtree_augmented::data_set_mesh::GetOwnedVerticesByGlobalIdWorklet(
localToGlobalIdRelabeler);
vtkm::cont::Invoker invoke;
invoke(ownedVerticesWorklet, // worklet ot run
meshIndexArray, // input mesh index to map
mesh, // input the mesh object
tempOwnedVertices // output
);
// now compress out the NO_SUCH_ELEMENT ones
vtkm::worklet::contourtree_augmented::NotNoSuchElementPredicate notNoSuchElementPredicate;
// compress the array
vtkm::cont::Algorithm::CopyIf(
tempOwnedVertices, // compress the array of owned vertices
tempOwnedVertices, // stencil. Same as input. Values to remove have NO_SUCH_ELEMENT flag
ownedVertices, // array where the compressed ownedVertices are stored
notNoSuchElementPredicate // unary predicate for deciding which nodes are considered true. Here those that do not have a NO_SUCH_ELEMENT flag.
);
}
// Sorts the data and initialises the SortIndices & SortOrder
template <typename T, typename StorageType>
inline void DataSetMesh::SortData(const vtkm::cont::ArrayHandle<T, StorageType>& values)

@ -240,15 +240,15 @@ inline void MergeTree::DebugPrintTree(const char* message,
for (vtkm::Id entry = 0; entry < mesh.NumVertices; entry++)
{
vtkm::Id sortIndex = mesh.SortIndices.ReadPortal().Get(entry);
vtkm::Id arc = this->Arcs.ReadPortal().Get(sortIndex);
vtkm::Id sortIndex = vtkm::cont::ArrayGetValue(entry, mesh.SortIndices);
vtkm::Id arc = vtkm::cont::ArrayGetValue(sortIndex, this->Arcs);
if (NoSuchElement(arc))
{
std::cout << "-1" << std::endl;
}
else
{
std::cout << mesh.SortOrder.ReadPortal().Get(arc) << std::endl;
std::cout << vtkm::cont::ArrayGetValue(arc, mesh.SortOrder) << std::endl;
}
if (mesh.MeshSize[2] == 1)
{ // 2D Mesh

@ -59,9 +59,9 @@
#include <vtkm/cont/Algorithm.h>
#include <vtkm/cont/ArrayHandleConstant.h>
#include <vtkm/cont/Invoker.h>
#include <vtkm/worklet/contourtree_augmented/PointerDoubling.h>
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/meshextrema/PointerDoubling.h>
#include <vtkm/worklet/contourtree_augmented/meshextrema/SetStarts.h>
@ -134,7 +134,7 @@ inline void MeshExtrema::BuildRegularChains(bool isMaximal)
IdArrayType& extrema = isMaximal ? Peaks : Pits;
// Create the PointerDoubling worklet and corresponding dispatcher
vtkm::worklet::contourtree_augmented::PointerDoubling pointerDoubler;
vtkm::worklet::contourtree_augmented::mesh_extrema_inc::PointerDoubling pointerDoubler;
// Iterate to perform pointer-doubling to build chains to extrema (i.e., maxima or minima)
// depending on whether we are computing a JoinTree or a SplitTree

@ -0,0 +1,82 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_not_no_such_element_predicate_h
#define vtk_m_worklet_contourtree_augmented_not_no_such_element_predicate_h
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
//Simple functor to subset a VTKm ArrayHandle
class NotNoSuchElementPredicate
{
public:
VTKM_EXEC_CONT
NotNoSuchElementPredicate() {}
VTKM_EXEC_CONT
bool operator()(const vtkm::Id& vertexId) const { return !NoSuchElement(vertexId); }
private:
};
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -128,6 +128,13 @@ inline void PrintSeparatingBar(vtkm::Id howMany, std::ostream& outStream = std::
} // PrintSeparatingBar()
// routine to print out a single index
inline void PrintIndexType(vtkm::Id index, std::ostream& outStream = std::cout)
{ // PrintIndexType
outStream << std::setw(PRINT_WIDTH - 6) << MaskedIndex(index) << " " << FlagString(index);
} // PrintIndexType
// routine to print out a single value
template <typename T>
inline void PrintDataType(T value, std::ostream& outStream = std::cout)
@ -136,11 +143,13 @@ inline void PrintDataType(T value, std::ostream& outStream = std::cout)
} // PrintDataType
// routine to print out a single index
inline void PrintIndexType(vtkm::Id index, std::ostream& outStream = std::cout)
{ // PrintIndexType
outStream << std::setw(PRINT_WIDTH - 6) << MaskedIndex(index) << " " << FlagString(index);
} // PrintIndexType
// Specialization of PrintDataType for vtkm::Id to use PrintIndexType instead so we can properly
// print Id arrays using the PrintArrayHandle function, e.g,. to pint permutted Id arrays.
template <>
inline void PrintDataType<vtkm::Id>(vtkm::Id value, std::ostream& outStream)
{
PrintIndexType(value, outStream);
}
// header line
@ -162,6 +171,32 @@ inline void PrintHeader(vtkm::Id howMany, std::ostream& outStream = std::cout)
} // PrintHeader()
// base routines for reading & writing host vectors
template <typename ARRAYTYPE>
inline void PrintArrayHandle(std::string label,
const ARRAYTYPE& dVec,
vtkm::Id nValues,
std::ostream& outStream)
{ // PrintArrayHandle()
// -1 means full size
if (nValues == -1)
{
nValues = dVec.GetNumberOfValues();
}
// print the label
PrintLabel(label, outStream);
// now print the data
auto portal = dVec.ReadPortal();
for (vtkm::Id entry = 0; entry < nValues; entry++)
{
PrintDataType(portal.Get(entry), outStream);
}
// and an std::endl
outStream << std::endl;
} // PrintArrayHandle()
// base routines for reading & writing host vectors
template <typename T, typename StorageType>
inline void PrintValues(std::string label,

@ -55,7 +55,9 @@
#define vtk_m_worklet_contourtree_augmented_types_h
#include <vtkm/Types.h>
#include <vtkm/cont/Algorithm.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ArrayHandleConstant.h>
#include <vtkm/cont/CellSetStructured.h>
namespace vtkm
@ -126,18 +128,61 @@ inline vtkm::Id MaskedIndex(vtkm::Id flaggedIndex)
return (flaggedIndex & INDEX_MASK);
} // MaskedIndex()
// Used in the context of CombinedVector class used in ContourTreeMesh to merge the mesh of contour trees
/// Used in the context of CombinedVector class used in ContourTreeMesh to merge the mesh of contour trees
VTKM_EXEC_CONT
inline bool IsThis(vtkm::Id flaggedIndex)
{ // IsThis
return ((flaggedIndex & CV_OTHER_FLAG) == 0);
} // IsThis
/// Helper function to set a single array valye with CopySubRange to avoid pulling the array to the control environment
VTKM_CONT
inline void IdArraySetValue(vtkm::Id index, vtkm::Id value, IdArrayType& arr)
{ // IdArraySetValue
vtkm::cont::Algorithm::CopySubRange(
vtkm::cont::ArrayHandleConstant<vtkm::Id>(value, 1), 0, 1, arr, index);
} // IdArraySetValues
/// Helper function used to resize a 1D ArrayHandle and initalize new values with a
/// given fillValue. For resizing ArrayHandles without initalizing new values VTKm
/// supports the vtkm::CopyFlag::On setting as part of the ArrayHandle.Allocate
/// method.
/// @param[in] thearray The 1D array to be resized
/// @param[in] newSize The new size the array should be changed to
/// @param[in] fillValue The value to be used to fill the array
template <typename ValueType>
void ResizeVector(vtkm::cont::ArrayHandle<ValueType>& thearray,
vtkm::Id newSize,
ValueType fillValue)
{
vtkm::Id oldSize = thearray.GetNumberOfValues();
// Simply return if the size of the array does not change
if (oldSize == newSize)
{
return;
}
// Resize the array but keep the original values
thearray.Allocate(newSize, vtkm::CopyFlag::On);
// Add the fill values to the array if we increased the size of the array
if (oldSize < newSize)
{
vtkm::cont::Algorithm::CopySubRange(
vtkm::cont::ArrayHandleConstant<ValueType>(fillValue, newSize - oldSize), // copy
0, // start copying from first index
newSize - oldSize, // num values to copy
thearray, // target array to copy to
oldSize // start copy to after oldSize
);
}
}
template <typename T>
struct MaskedIndexFunctor
{
VTKM_EXEC_CONT
MaskedIndexFunctor() {}
VTKM_EXEC_CONT

@ -8,11 +8,12 @@
## PURPOSE. See the above copyright notice for more information.
##============================================================================
set(headers
GetOwnedVerticesByGlobalIdWorklet.h
IdRelabeler.h
MeshStructure2D.h
MeshStructure3D.h
SimulatedSimplicityComperator.h
SortIndices.h
IdRelabeler.h
)
#-----------------------------------------------------------------------------

@ -0,0 +1,105 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_mesh_dem_get_owned_vertices_by_global_id_worklet_h
#define vtk_m_worklet_contourtree_augmented_mesh_dem_get_owned_vertices_by_global_id_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/IdRelabeler.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace data_set_mesh
{
// Worklet for computing the sort indices from the sort order
class GetOwnedVerticesByGlobalIdWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(FieldIn meshIndices, // (input) index into active vertices
ExecObject meshStructure, // (input) mesh structure execution object
FieldOut ownedVertices // (output) vertices owned by the mesh
);
using ExecutionSignature = _3(_1, _2);
using InputDomain = _1;
// Constructor
VTKM_EXEC_CONT
GetOwnedVerticesByGlobalIdWorklet(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler)
: LocalToGlobalIdRelabeler(localToGlobalIdRelabeler)
{
}
/// Functor returning NO_SUCH_ELEMENT if vertex is not owed or the global mesh index if the vertex is owned
/// The functor simply calls the GetVertexOwned functin of the meshStructure for all vertices
template <typename MeshStructureType>
VTKM_EXEC vtkm::Id operator()(const vtkm::Id meshIndex,
const MeshStructureType& meshStructure) const
{
return meshStructure.GetVertexOwned(meshIndex, this->LocalToGlobalIdRelabeler);
}
private:
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler LocalToGlobalIdRelabeler;
}; // Mesh2D_DEM_VertexStarter
} // namespace data_set_mesh
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -110,9 +110,11 @@ public:
return (pos[2] * this->GlobalSize[1] + pos[1]) * this->GlobalSize[0] + pos[0];
}
private:
/// Local block origin
vtkm::Id3 LocalBlockOrigin;
/// Local block size
vtkm::Id3 LocalBlockSize;
/// Global mesh size
vtkm::Id3 GlobalSize;
};

@ -54,6 +54,7 @@
#define vtk_m_worklet_contourtree_augmented_data_set_mesh_execution_object_mesh_2d_h
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/IdRelabeler.h>
namespace vtkm
{
@ -80,20 +81,48 @@ public:
{
}
// number of mesh vertices
/// Get the number of mesh vertices
VTKM_EXEC_CONT
vtkm::Id GetNumberOfVertices() const { return (this->MeshSize[0] * this->MeshSize[1]); }
/// Get the (x,y) position of the vertex based on its index
VTKM_EXEC
inline vtkm::Id2 VertexPos(vtkm::Id v) const
{
return vtkm::Id2{ v % this->MeshSize[0], v / this->MeshSize[0] };
}
//vertex ID - row * ncols + col
///vertex ID - row * ncols + col
VTKM_EXEC
inline vtkm::Id VertexId(vtkm::Id2 pos) const { return pos[1] * this->MeshSize[0] + pos[0]; }
/// determine if the vertex is owned by this mesh block or not
/// The function returns NO_SUCH_ELEMENT if the vertex is not owned by the block and
/// otherwise it returns global id of the vertex as determined via the IdRelabeler
VTKM_EXEC_CONT
inline vtkm::Id GetVertexOwned(const vtkm::Id& meshIndex,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler&
localToGlobalIdRelabeler) const
{
// Get the vertex position
vtkm::Id2 pos = this->VertexPos(meshIndex);
// now test - the low ID boundary belongs to this block
// the high ID boundary belongs to the next block if there is one
if (((pos[1] == this->MeshSize[1] - 1) &&
(pos[1] + localToGlobalIdRelabeler.LocalBlockOrigin[1] !=
localToGlobalIdRelabeler.GlobalSize[1] - 1)) ||
((pos[0] == this->MeshSize[0] - 1) &&
(pos[0] + localToGlobalIdRelabeler.LocalBlockOrigin[0] !=
localToGlobalIdRelabeler.GlobalSize[0] - 1)))
{
return vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
else
{
return localToGlobalIdRelabeler(meshIndex);
}
}
vtkm::Id2 MeshSize;
}; // MeshStructure2D

@ -104,6 +104,36 @@ public:
return (pos[2] * this->MeshSize[1] + pos[1]) * this->MeshSize[0] + pos[0];
}
/// determine if the vertex is owned by this mesh block or not
/// The function returns NO_SUCH_ELEMENT if the vertex is not owned by the block and
/// otherwise it returns global id of the vertex as determined via the IdRelabeler
VTKM_EXEC_CONT
inline vtkm::Id GetVertexOwned(const vtkm::Id& meshIndex,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler&
localToGlobalIdRelabeler) const
{
// Get the vertex position
vtkm::Id3 pos = this->VertexPos(meshIndex);
// now test - the low ID boundary belongs to this block
// the high ID boundary belongs to the next block if there is one
if (((pos[1] == this->MeshSize[1] - 1) &&
(pos[1] + localToGlobalIdRelabeler.LocalBlockOrigin[1] !=
localToGlobalIdRelabeler.GlobalSize[1] - 1)) ||
((pos[0] == this->MeshSize[0] - 1) &&
(pos[0] + localToGlobalIdRelabeler.LocalBlockOrigin[0] !=
localToGlobalIdRelabeler.GlobalSize[0] - 1)) ||
((pos[2] == this->MeshSize[2] - 1) &&
(pos[2] + localToGlobalIdRelabeler.LocalBlockOrigin[2] !=
localToGlobalIdRelabeler.GlobalSize[2] - 1)))
{
return vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
else
{
return localToGlobalIdRelabeler(meshIndex);
}
}
vtkm::Id3 MeshSize;
}; // Mesh_DEM_2D_ExecutionObject

@ -9,7 +9,8 @@
##============================================================================
set(headers
SetStarts.h
)
PointerDoubling.h
)
#-----------------------------------------------------------------------------
vtkm_declare_headers(${headers})

@ -63,6 +63,8 @@ namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_extrema_inc
{
// Functor for doing chain doubling
// Unary because it takes the index of the element to process, and is not guaranteed to
@ -94,6 +96,7 @@ public:
}
}; // PointerDoubling
} // namespace mesh_extrema_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm

@ -70,30 +70,47 @@ namespace worklet
namespace contourtree_augmented
{
/// Class representing a 2D dataset mesh with freudenthal triangulation connectivity for contour tree computation
class DataSetMeshTriangulation2DFreudenthal
: public DataSetMesh
, public vtkm::cont::ExecutionObjectBase
{ // class DataSetMeshTriangulation
public:
// Constants and case tables
/// Constants and case tables
m2d_freudenthal::EdgeBoundaryDetectionMasksType EdgeBoundaryDetectionMasks;
static constexpr int MAX_OUTDEGREE = 3;
//Mesh dependent helper functions
void SetPrepareForExecutionBehavior(bool getMax);
/// Prepare mesh for use in VTKm worklets. This function creates a MeshStructureFreudenthal2D
/// ExecutionObject that implements relevant mesh functions on the device.
MeshStructureFreudenthal2D PrepareForExecution(vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const;
/// Constructor
/// @param meshSize vtkm::Id2 object describing the number of vertices in x and y
DataSetMeshTriangulation2DFreudenthal(vtkm::Id2 meshSize);
/// Helper function to create a boundary excution object for the mesh. The MeshBoundary2DExec object
/// implements functions for using in worklets in VTKm's execution environment related the boundary
/// of the mesh.
MeshBoundary2DExec GetMeshBoundaryExecutionObject() const;
void GetBoundaryVertices(IdArrayType& boundaryVertexArray, // output
IdArrayType& boundarySortIndexArray, // output
MeshBoundary2DExec* meshBoundaryExecObj =
NULL // optional input, included for consistency with ContourTreeMesh
) const;
/// Get boundary vertices
/// @param[out] boundaryVertexArray Array of boundary vertices
/// @param[out] boundarySortIndexArray Array of sort index of boundary vertices
/// @param[in] meshBoundaryExecObj Optional mesh boundary object inluced for consistency with ContourTreeMesh.
/// if omitted, GetMeshBoundaryExecutionObject() will be used.
void GetBoundaryVertices(IdArrayType& boundaryVertexArray,
IdArrayType& boundarySortIndexArray,
MeshBoundary2DExec* meshBoundaryExecObj = NULL) const;
/// Get of global indices of the vertices owned by this mesh. Implemented via
/// DataSetMesh.GetOwnedVerticesByGlobalIdImpl
void GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const;
private:
bool UseGetMax; // Define the behavior ofr the PrepareForExecution function
@ -157,6 +174,14 @@ inline void DataSetMeshTriangulation2DFreudenthal::GetBoundaryVertices(
);
}
// Overwrite the implemenation from the base DataSetMesh parent class
inline void DataSetMeshTriangulation2DFreudenthal::GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const
{
return this->GetOwnedVerticesByGlobalIdImpl(this, localToGlobalIdRelabeler, ownedVertices);
}
} // namespace contourtree_augmented
} // worklet
} // vtkm

@ -100,6 +100,12 @@ public:
NULL // optional input, included for consistency with ContourTreeMesh
) const;
/// Get of global indices of the vertices owned by this mesh. Implemented via
/// DataSetMesh.GetOwnedVerticesByGlobalIdImpl.
void GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const;
private:
bool UseGetMax; // Define the behavior ofr the PrepareForExecution function
}; // class DataSetMeshTriangulation
@ -173,6 +179,14 @@ inline void DataSetMeshTriangulation3DFreudenthal::GetBoundaryVertices(
);
}
// Overwrite the implemenation from the base DataSetMesh parent class
inline void DataSetMeshTriangulation3DFreudenthal::GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const
{
return this->GetOwnedVerticesByGlobalIdImpl(this, localToGlobalIdRelabeler, ownedVertices);
}
} // namespace contourtree_augmented
} // worklet
} // vtkm

@ -105,6 +105,12 @@ public:
nullptr // optional input, included for consistency with ContourTreeMesh
) const;
/// Get of global indices of the vertices owned by this mesh. Implemented via
/// DataSetMesh.GetOwnedVerticesByGlobalIdImpl.
void GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const;
private:
bool UseGetMax; // Define the behavior ofr the PrepareForExecution function
}; // class DataSetMesh_Triangulation
@ -206,6 +212,14 @@ inline void DataSetMeshTriangulation3DMarchingCubes::GetBoundaryVertices(
);
}
// Overwrite the implemenation from the base DataSetMesh parent class
inline void DataSetMeshTriangulation3DMarchingCubes::GetOwnedVerticesByGlobalId(
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
IdArrayType& ownedVertices) const
{
return this->GetOwnedVerticesByGlobalIdImpl(this, localToGlobalIdRelabeler, ownedVertices);
}
} // namespace contourtree_augmented
} // worklet
} // vtkm

@ -55,6 +55,7 @@
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/worklet/contourtree_augmented/ContourTree.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/PiecewiseLinearFunction.h>
#include <cmath>
@ -116,9 +117,9 @@ public:
private:
// Private default constructore to ensure that branch decomposition can only be created from a contour tree or loaded from storate (via static methods)
Branch()
: Extremum((vtkm::Id)NO_SUCH_ELEMENT)
: Extremum((vtkm::Id)vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT)
, ExtremumVal(0)
, Saddle((vtkm::Id)NO_SUCH_ELEMENT)
, Saddle((vtkm::Id)vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT)
, SaddleVal(0)
, Volume(0)
, Parent(nullptr)

@ -132,6 +132,7 @@ std::string BoundaryTree::Print()
resultStream << "Boundary-Restricted Augmented Contour Tree" << std::endl;
resultStream << "==========================================" << std::endl;
// fill it up
// We use regular ReadPortal here since we need access to all values anyways
auto superarcsPortal = this->Superarcs.ReadPortal();
auto vertexIndexPortal = this->VertexIndex.ReadPortal();
for (vtkm::Id node = 0; node < superarcsPortal.GetNumberOfValues(); node++)
@ -171,6 +172,7 @@ std::string BoundaryTree::PrintGlobalDot(const char* label,
blockOrigin, blockSize, globalSize);
// loop through all nodes
// We use regular ReadPortal here since we need access to most values anyways
auto vertexIndexPortal = this->VertexIndex.ReadPortal();
auto superarcsPortal = this->Superarcs.ReadPortal();
auto sortOrderPortal = mesh.SortOrder.ReadPortal();
@ -226,6 +228,7 @@ std::string BoundaryTree::PrintGlobalDot(
resultStream << "\tlabel=\"" << label << "\"\n\tlabelloc=t\n\tfontsize=30\n" << std::endl;
// loop through all nodes
// We use regular ReadPortal here since we need access to all values anyways
auto vertexIndexPortal = this->VertexIndex.ReadPortal();
auto globalMeshIndexPortal = mesh.GlobalMeshIndex.ReadPortal();
auto sortedValuesPortal = mesh.SortedValues.ReadPortal();

@ -454,18 +454,16 @@ void BoundaryTreeMaker<MeshType, MeshBoundaryExecObjType>::PropagateBoundaryCoun
#endif
// b. Iterate, propagating counts inwards
auto firstSupernodePerIterationReadPortal =
this->ContourTree.FirstSupernodePerIteration.ReadPortal();
auto firstHypernodePerIterationReadPortal =
this->ContourTree.FirstHypernodePerIteration.ReadPortal();
for (vtkm::Id iteration = 0; iteration < this->ContourTree.NumIterations; iteration++)
{ // b. per iteration
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info, this->DebugPrint("Top of Loop:", __FILE__, __LINE__));
#endif
// i. Pull the array bounds into register
vtkm::Id firstSupernode = firstSupernodePerIterationReadPortal.Get(iteration);
vtkm::Id lastSupernode = firstSupernodePerIterationReadPortal.Get(iteration + 1);
vtkm::Id firstSupernode =
vtkm::cont::ArrayGetValue(iteration, this->ContourTree.FirstSupernodePerIteration);
vtkm::Id lastSupernode =
vtkm::cont::ArrayGetValue(iteration + 1, this->ContourTree.FirstSupernodePerIteration);
if (lastSupernode == firstSupernode)
{
@ -477,8 +475,10 @@ void BoundaryTreeMaker<MeshType, MeshBoundaryExecObjType>::PropagateBoundaryCoun
continue;
}
vtkm::Id firstHypernode = firstHypernodePerIterationReadPortal.Get(iteration);
vtkm::Id lastHypernode = firstHypernodePerIterationReadPortal.Get(iteration + 1);
vtkm::Id firstHypernode =
vtkm::cont::ArrayGetValue(iteration, this->ContourTree.FirstHypernodePerIteration);
vtkm::Id lastHypernode =
vtkm::cont::ArrayGetValue(iteration + 1, this->ContourTree.FirstHypernodePerIteration);
// ii. Add xfer + int & store in dependent count
// Compute the sum of this->SupernodeTransferBoundaryCount and this->SuperarcIntrinsicBoundaryCount
@ -639,13 +639,15 @@ void BoundaryTreeMaker<MeshType, MeshBoundaryExecObjType>::PropagateBoundaryCoun
// when we are done, we need to force the summation for the root node, JUST IN CASE it is a boundary node itself
// BTW, the value *SHOULD* be the number of boundary nodes, anyway
vtkm::Id rootSuperId = this->ContourTree.Supernodes.GetNumberOfValues() - 1;
this->SuperarcDependentBoundaryCount.WritePortal().Set(
vtkm::worklet::contourtree_augmented::IdArraySetValue(
rootSuperId,
this->SupernodeTransferBoundaryCount.ReadPortal().Get(rootSuperId) +
this->SuperarcIntrinsicBoundaryCount.ReadPortal().Get(rootSuperId));
this->HyperarcDependentBoundaryCount.WritePortal().Set(
vtkm::cont::ArrayGetValue(rootSuperId, this->SupernodeTransferBoundaryCount) +
vtkm::cont::ArrayGetValue(rootSuperId, this->SuperarcIntrinsicBoundaryCount),
this->SuperarcDependentBoundaryCount);
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->ContourTree.Hypernodes.GetNumberOfValues() - 1,
this->SuperarcDependentBoundaryCount.ReadPortal().Get(rootSuperId));
vtkm::cont::ArrayGetValue(rootSuperId, this->SuperarcDependentBoundaryCount),
this->HyperarcDependentBoundaryCount);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
@ -1137,7 +1139,8 @@ void BoundaryTreeMaker<MeshType, MeshBoundaryExecObjType>::CompressRegularisedNo
// first create the array: start by observing that the last entry is guaranteed
// to hold the total number of necessary vertices
this->NumKept = keptInBoundaryTree.ReadPortal().Get(keptInBoundaryTree.GetNumberOfValues() - 1);
this->NumKept =
vtkm::cont::ArrayGetValue(keptInBoundaryTree.GetNumberOfValues() - 1, keptInBoundaryTree);
// create an array to store the new superarc Ids and initalize it with NO_SUCH_ELEMENT
vtkm::worklet::contourtree_augmented::IdArrayType newSuperarc;
vtkm::cont::Algorithm::Copy(

@ -11,10 +11,15 @@
set(headers
BoundaryTree.h
BoundaryTreeMaker.h
CombineHyperSweepBlockFunctor.h
ComputeDistributedContourTreeFunctor.h
ContourTreeBlockData.h
DistributedContourTreeBlockData.h
HierarchicalAugmenter.h
HierarchicalAugmenterFunctor.h
HierarchicalContourTree.h
HierarchicalHyperSweeper.h
HyperSweepBlock.h
InteriorForest.h
MergeBlockFunctor.h
MultiBlockContourTreeHelper.h
@ -29,3 +34,5 @@ vtkm_declare_headers(${headers})
add_subdirectory(boundary_tree_maker)
add_subdirectory(tree_grafter)
add_subdirectory(hierarchical_contour_tree)
add_subdirectory(hierarchical_hyper_sweeper)
add_subdirectory(hierarchical_augmenter)

@ -0,0 +1,154 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_combinehypersweepblockfunctor_h
#define vtk_m_worklet_contourtree_distributed_combinehypersweepblockfunctor_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayGetValues.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/worklet/contourtree_distributed/HyperSweepBlock.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/diy/diy.h>
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
// #define DEBUG_PRINT_COMBINED_BLOCK_IDS
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
template <typename ContourTreeDataFieldType>
struct CobmineHyperSweepBlockFunctor
{
void operator()(
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::ReduceProxy& rp, // communication proxy
const vtkmdiy::RegularSwapPartners& // partners of the current block (unused)
) const
{
// Get our rank and DIY id
//const vtkm::Id rank = vtkm::cont::EnvironmentTracker::GetCommunicator().rank();
const auto selfid = rp.gid();
std::vector<int> incoming;
rp.incoming(incoming);
for (const int ingid : incoming)
{
auto roundNo = rp.round() - 1;
// NOTE/IMPORTANT: In each round we should have only one swap partner (despite for-loop here).
// If that assumption does not hold, it will break things.
// NOTE/IMPORTANT: This assumption only holds if the number of blocks is a power of two.
// Otherwise, we may need to process more than one incoming block
if (ingid != selfid)
{
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
int incomingGlobalBlockId;
rp.dequeue(ingid, incomingGlobalBlockId);
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
"Combining local block " << b->GlobalBlockId << " with incomoing block "
<< incomingGlobalBlockId);
#endif
vtkm::cont::ArrayHandle<vtkm::Id> incomingIntrinsicVolume;
rp.dequeue(ingid, incomingIntrinsicVolume);
vtkm::cont::ArrayHandle<vtkm::Id> incomingDependentVolume;
rp.dequeue(ingid, incomingDependentVolume);
vtkm::Id numSupernodesToProcess = vtkm::cont::ArrayGetValue(
vtkm::Id{ 0 }, b->HierarchicalContourTree.FirstSupernodePerIteration[roundNo]);
auto intrinsicVolumeView =
make_ArrayHandleView(b->IntrinsicVolume, 0, numSupernodesToProcess);
auto incomingIntrinsicVolumeView =
make_ArrayHandleView(incomingIntrinsicVolume, 0, numSupernodesToProcess);
vtkm::cont::ArrayHandle<vtkm::Id> tempSum;
vtkm::cont::Algorithm::Transform(
intrinsicVolumeView, incomingIntrinsicVolumeView, tempSum, vtkm::Sum());
vtkm::cont::Algorithm::Copy(tempSum, intrinsicVolumeView);
auto dependentVolumeView =
make_ArrayHandleView(b->DependentVolume, 0, numSupernodesToProcess);
auto incomingDependentVolumeView =
make_ArrayHandleView(incomingDependentVolume, 0, numSupernodesToProcess);
vtkm::cont::Algorithm::Transform(
dependentVolumeView, incomingDependentVolumeView, tempSum, vtkm::Sum());
vtkm::cont::Algorithm::Copy(tempSum, dependentVolumeView);
}
}
for (int cc = 0; cc < rp.out_link().size(); ++cc)
{
auto target = rp.out_link().target(cc);
if (target.gid != selfid)
{
#ifdef DEBUG_PRINT_COMBINED_BLOCK_IDS
rp.enqueue(target, b->GlobalBlockId);
#endif
rp.enqueue(target, b->IntrinsicVolume);
rp.enqueue(target, b->DependentVolume);
}
}
}
};
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -327,6 +327,7 @@ public:
<< std::endl
<< " Rank : " << rank << std::endl
<< " DIY Id : " << selfid << std::endl
<< " In Id : " << ingid << std::endl
<< " Round : " << rp.round() << std::endl
<< worklet.TimingsLogString);
// Log the contour tree size stats
@ -336,6 +337,7 @@ public:
<< std::endl
<< " Rank : " << rank << std::endl
<< " DIY Id : " << selfid << std::endl
<< " In Id : " << ingid << std::endl
<< " Round : " << rp.round() << std::endl
<< block->ContourTrees.back().PrintArraySizes());
@ -465,6 +467,11 @@ public:
rp.enqueue(target, block->BlockOrigin);
rp.enqueue(target, block->BlockSize);
rp.enqueue(target, block->ContourTreeMeshes.back());
VTKM_LOG_S(this->TreeLogLevel,
std::endl
<< "FanInEnqueue: Rank=" << rank << "; Round=" << rp.round()
<< "; DIY Send Id=" << selfid << "; DIY Target ID=" << target.gid
<< std::endl);
}
} // end for

@ -56,6 +56,8 @@
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/meshtypes/ContourTreeMesh.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalAugmenter.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
@ -73,24 +75,29 @@ namespace contourtree_distributed
template <typename FieldType>
struct DistributedContourTreeBlockData
{
/// Function required by DIY
static void* create() { return new DistributedContourTreeBlockData<FieldType>; }
// Block metadata
int GlobalBlockId; // Global DIY id of this block
vtkm::Id LocalBlockNo; // Local block id on this rank
vtkm::Id3 BlockOrigin; // Origin of the data block
vtkm::Id3 BlockSize; // Extends of the data block
/// Function required by DIY
static void destroy(void* b)
{
delete static_cast<DistributedContourTreeBlockData<FieldType>*>(b);
}
// Block data
vtkm::Id BlockIndex;
// Fan in data
std::vector<vtkm::worklet::contourtree_augmented::ContourTree> ContourTrees;
std::vector<vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType>> ContourTreeMeshes;
std::vector<vtkm::worklet::contourtree_distributed::InteriorForest> InteriorForests;
// Block metadata
vtkm::Id3 BlockOrigin; // Origin of the data block
vtkm::Id3 BlockSize; // Extends of the data block
// Fan out data
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType> HierarchicalTree;
// Augmentation phase
vtkm::worklet::contourtree_distributed::HierarchicalAugmenter<FieldType> HierarchicalAugmenter;
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType> AugmentedTree;
// Destroy function allowing DIY to own blocks and clean them up after use
static void destroy(void* b)
{
delete static_cast<DistributedContourTreeBlockData<FieldType>*>(b);
}
};
} // namespace contourtree_distributed
} // namespace worklet

File diff suppressed because it is too large Load Diff

@ -0,0 +1,119 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchicalaugmenterfunctor_h
#define vtk_m_worklet_contourtree_distributed_hierarchicalaugmenterfunctor_h
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_distributed/DistributedContourTreeBlockData.h>
#include <vtkm/worklet/contourtree_distributed/PrintGraph.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/diy/diy.h>
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
/// Functor used by DIY reduce the merge data blocks in parallel
template <typename FieldType>
class HierarchicalAugmenterFunctor
{
public:
void operator()(
vtkm::worklet::contourtree_distributed::DistributedContourTreeBlockData<FieldType>*
blockData, // local Block.
const vtkmdiy::ReduceProxy& rp, // communication proxy
const vtkmdiy::RegularSwapPartners& // partners of the current block (unused)
) const
{
auto round = rp.round();
const auto selfid = rp.gid();
for (int i = 0; i < rp.in_link().size(); ++i)
{
int ingid = rp.in_link().target(i).gid;
if (ingid != selfid)
{ // Receive and augment
rp.dequeue(ingid, blockData->HierarchicalAugmenter.InData);
blockData->HierarchicalAugmenter.RetrieveInAttachmentPoints();
}
}
for (int i = 0; i < rp.out_link().size(); ++i)
{
auto target = rp.out_link().target(i);
if (target.gid != selfid)
{ // Send to partner
blockData->HierarchicalAugmenter.PrepareOutAttachmentPoints(round);
// TODO/FIXME: Correct function? Correct round?
rp.enqueue(target, blockData->HierarchicalAugmenter.OutData);
// TODO/FIXME: Is it save to already release HierarchicalAugmenter.OutData (and InData) here. Don't we free the arrays before the other block had the chance to complete its rp.dequeue?
blockData->HierarchicalAugmenter.ReleaseSwapArrays();
}
}
}
};
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -68,7 +68,10 @@
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_contour_tree_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_contour_tree_h
#define VOLUME_PRINT_WIDTH 8
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/ContourTree.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/meshtypes/ContourTreeMesh.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_contour_tree/FindRegularByGlobal.h>
@ -141,6 +144,9 @@ public:
// how many rounds of fan-in were used to construct it
vtkm::Id NumRounds;
// use for debugging? -> This makes more sense in hyper sweeper?
// vtkm::Id NumOwnedRegularVertices;
// The following arrays store the numbers of reg/super/hyper nodes at each level of the hierarchy
// They are filled in from the top down, and are fundamentally CPU side control variables
// They will be needed for hypersweeps.
@ -160,7 +166,7 @@ public:
/// routine to create a FindRegularByGlobal object that we can use as an input for worklets to call the function
VTKM_CONT
FindRegularByGlobal GetFindRegularByGlobal()
FindRegularByGlobal GetFindRegularByGlobal() const
{
return FindRegularByGlobal(this->RegularNodeSortOrder, this->RegularNodeGlobalIds);
}
@ -216,7 +222,17 @@ public:
/// debug routine
VTKM_CONT
std::string DebugPrint(const char* message, const char* fileName, long lineNum) const;
std::string DebugPrint(std::string message, const char* fileName, long lineNum) const;
// modified version of dumpSuper() that also gives volume counts
VTKM_CONT
static std::string DumpVolumes(
const vtkm::worklet::contourtree_augmented::IdArrayType& supernodes,
const vtkm::worklet::contourtree_augmented::IdArrayType& superarcs,
const vtkm::worklet::contourtree_augmented::IdArrayType& regularNodeGlobalIds,
vtkm::Id totalVolume,
const vtkm::worklet::contourtree_augmented::IdArrayType& intrinsicVolume,
const vtkm::worklet::contourtree_augmented::IdArrayType& dependentVolume);
private:
/// Used internally to Invoke worklets
@ -225,6 +241,7 @@ private:
template <typename FieldType>
HierarchicalContourTree<FieldType>::HierarchicalContourTree()
//: NumOwnedRegularVertices(static_cast<vtkm::Id>(0))
{ // constructor
NumRegularNodesInRound.ReleaseResources();
NumSupernodesInRound.ReleaseResources();
@ -247,24 +264,31 @@ void HierarchicalContourTree<FieldType>::Initialize(
auto tempZeroArray = vtkm::cont::ArrayHandleConstant<vtkm::Id>(0, this->NumRounds + 1);
vtkm::cont::Algorithm::Copy(tempZeroArray, this->NumIterations);
vtkm::cont::Algorithm::Copy(tempZeroArray, this->NumRegularNodesInRound);
this->NumRegularNodesInRound.WritePortal().Set(this->NumRounds, tree.Nodes.GetNumberOfValues());
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->NumRounds, tree.Nodes.GetNumberOfValues(), this->NumRegularNodesInRound);
vtkm::cont::Algorithm::Copy(tempZeroArray, this->NumSupernodesInRound);
this->NumSupernodesInRound.WritePortal().Set(this->NumRounds,
tree.Supernodes.GetNumberOfValues());
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->NumRounds, tree.Supernodes.GetNumberOfValues(), this->NumSupernodesInRound);
vtkm::cont::Algorithm::Copy(tempZeroArray, this->NumHypernodesInRound);
this->NumHypernodesInRound.WritePortal().Set(this->NumRounds,
tree.Hypernodes.GetNumberOfValues());
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->NumRounds, tree.Hypernodes.GetNumberOfValues(), this->NumHypernodesInRound);
}
// copy the iterations of the top level hypersweep - this is +1: one because we are counting inclusively
// HAC JAN 15, 2020: In order to make this consistent with grafting rounds for hybrid hypersweeps, we add one to the logical number of
// iterations instead of the prior version which stored an extra extra element (ie +2)
// WARNING! WARNING! WARNING! This is a departure from the treatment in the contour tree, where the last iteration to the NULL root was
// treated as an implicit round.
{
vtkm::Id tempSizeVal = vtkm::cont::ArrayGetValue(this->NumRounds, this->NumIterations) + 1;
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->NumRounds, tree.NumIterations + 1, this->NumIterations);
this->FirstSupernodePerIteration.resize(static_cast<std::size_t>(this->NumRounds + 1));
this->FirstSupernodePerIteration[static_cast<std::size_t>(this->NumRounds)].Allocate(
tempSizeVal);
this->FirstHypernodePerIteration.resize(static_cast<std::size_t>(this->NumRounds + 1));
this->FirstHypernodePerIteration[static_cast<std::size_t>(this->NumRounds)].Allocate(
tempSizeVal);
}
// copy the iterations of the top level hypersweep - this is +2: one because we are counting inclusively, the second because we need an
// extra one at the end to compute sizes with deltas
this->NumIterations.WritePortal().Set(this->NumRounds, tree.NumIterations);
this->FirstSupernodePerIteration.resize(static_cast<std::size_t>(this->NumRounds + 1));
this->FirstSupernodePerIteration[static_cast<std::size_t>(this->NumRounds)].Allocate(
this->NumIterations.ReadPortal().Get(this->NumRounds) + 2);
this->FirstHypernodePerIteration.resize(static_cast<std::size_t>(this->NumRounds + 1));
this->FirstHypernodePerIteration[static_cast<std::size_t>(this->NumRounds)].Allocate(
this->NumIterations.ReadPortal().Get(this->NumRounds) + 2);
// now copy in the details. Use CopySubRagnge to ensure that the Copy does not shrink the size
// of the array as the arrays are in this case allocated above to the approbriate size
vtkm::cont::Algorithm::CopySubRange(
@ -371,18 +395,18 @@ std::string HierarchicalContourTree<FieldType>::RegularString(const vtkm::Id reg
{
resultStream << "Regular ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(regularId, resultStream);
resultStream << " Value: " << this->DataValues.ReadPortal().Get(regularId);
resultStream << " Value: " << vtkm::cont::ArrayGetValue(regularId, this->DataValues);
resultStream << " Global ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->RegularNodeGlobalIds.ReadPortal().Get(regularId), resultStream);
vtkm::cont::ArrayGetValue(regularId, this->RegularNodeGlobalIds), resultStream);
resultStream << " Regular ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(regularId, resultStream);
resultStream << " SNode ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Regular2Supernode.ReadPortal().Get(regularId), resultStream);
vtkm::cont::ArrayGetValue(regularId, this->Regular2Supernode), resultStream);
resultStream << "Superparents: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Superparents.ReadPortal().Get(regularId));
vtkm::cont::ArrayGetValue(regularId, this->Superparents));
}
return resultStream.str();
} // RegularString()
@ -400,43 +424,32 @@ std::string HierarchicalContourTree<FieldType>::SuperString(const vtkm::Id super
}
else
{
vtkm::Id unmaskedSuperId = vtkm::worklet::contourtree_augmented::MaskedIndex(superId);
vtkm::Id tempSupernodeOfSuperId = vtkm::cont::ArrayGetValue(unmaskedSuperId, this->Supernodes);
resultStream << "Super ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(superId, resultStream);
resultStream << " Value: "
<< this->DataValues.ReadPortal().Get(this->Supernodes.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(superId)));
<< vtkm::cont::ArrayGetValue(tempSupernodeOfSuperId, this->DataValues);
resultStream << " Global ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->RegularNodeGlobalIds.ReadPortal().Get(this->Supernodes.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(superId))),
resultStream);
vtkm::cont::ArrayGetValue(tempSupernodeOfSuperId, this->RegularNodeGlobalIds), resultStream);
resultStream << " Regular Id: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Supernodes.ReadPortal().Get(vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::worklet::contourtree_augmented::PrintIndexType(tempSupernodeOfSuperId, resultStream);
resultStream << " Superarc: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Superarcs.ReadPortal().Get(vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedSuperId, this->Superarcs), resultStream);
resultStream << " HNode ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Super2Hypernode.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedSuperId, this->Super2Hypernode), resultStream);
resultStream << " Hyperparent: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Hyperparents.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedSuperId, this->Hyperparents), resultStream);
resultStream << " Round: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->WhichRound.ReadPortal().Get(vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedSuperId, this->WhichRound), resultStream);
resultStream << " Iteration: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->WhichIteration.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(superId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedSuperId, this->WhichIteration), resultStream);
}
return resultStream.str();
} // SuperString()
@ -454,34 +467,24 @@ std::string HierarchicalContourTree<FieldType>::HyperString(const vtkm::Id hyper
}
else
{
vtkm::Id unmaskedHyperId = vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId);
vtkm::Id hypernodeOfHyperId = vtkm::cont::ArrayGetValue(unmaskedHyperId, this->Hypernodes);
vtkm::Id supernodeOfHyperId = vtkm::cont::ArrayGetValue(hypernodeOfHyperId, this->Supernodes);
resultStream << "Hyper Id: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(hyperId, resultStream);
resultStream << " Value: "
<< this->DataValues.ReadPortal().Get(
this->Supernodes.ReadPortal().Get(this->Hypernodes.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId))));
resultStream << " Value: " << vtkm::cont::ArrayGetValue(supernodeOfHyperId, this->DataValues);
resultStream << " Global ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->RegularNodeGlobalIds.ReadPortal().Get(
this->Supernodes.ReadPortal().Get(this->Hypernodes.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId)))),
resultStream);
vtkm::cont::ArrayGetValue(supernodeOfHyperId, this->RegularNodeGlobalIds), resultStream);
resultStream << " Regular ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Supernodes.ReadPortal().Get(this->Hypernodes.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId))),
resultStream);
vtkm::worklet::contourtree_augmented::PrintIndexType(supernodeOfHyperId, resultStream);
resultStream << " Super ID: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Hypernodes.ReadPortal().Get(vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId)),
resultStream);
vtkm::worklet::contourtree_augmented::PrintIndexType(hypernodeOfHyperId, resultStream);
resultStream << " Hyperarc: ";
vtkm::worklet::contourtree_augmented::PrintIndexType(
this->Hyperarcs.ReadPortal().Get(vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId)),
resultStream);
vtkm::cont::ArrayGetValue(unmaskedHyperId, this->Hyperarcs), resultStream);
resultStream << " Superchildren: "
<< this->Superchildren.ReadPortal().Get(
vtkm::worklet::contourtree_augmented::MaskedIndex(hyperId));
<< vtkm::cont::ArrayGetValue(unmaskedHyperId, this->Superchildren);
}
return resultStream.str();
} // HyperString()
@ -496,11 +499,11 @@ std::string HierarchicalContourTree<FieldType>::ProbeHyperPath(const vtkm::Id re
resultStream << "Node: " << this->RegularString(regularId) << std::endl;
// find the superparent
vtkm::Id superparent = this->Superparents.ReadPortal().Get(regularId);
vtkm::Id superparent = vtkm::cont::ArrayGetValue(regularId, this->Superparents);
resultStream << "Superparent: " << SuperString(superparent) << std::endl;
// and the hyperparent
vtkm::Id hyperparent = this->Hyperparents.ReadPortal().Get(superparent);
vtkm::Id hyperparent = vtkm::cont::ArrayGetValue(superparent, this->Hyperparents);
// now trace the path inwards: terminate on last round when we have null hyperarc
vtkm::Id length = 0;
@ -514,7 +517,7 @@ std::string HierarchicalContourTree<FieldType>::ProbeHyperPath(const vtkm::Id re
resultStream << "Hyperparent: " << this->HyperString(hyperparent) << std::endl;
// retrieve the target of the hyperarc
vtkm::Id hypertarget = this->Hyperarcs.ReadPortal().Get(hyperparent);
vtkm::Id hypertarget = vtkm::cont::ArrayGetValue(hyperparent, this->Hyperarcs);
resultStream << "Hypertarget: "
<< SuperString(vtkm::worklet::contourtree_augmented::MaskedIndex(hypertarget))
@ -531,11 +534,11 @@ std::string HierarchicalContourTree<FieldType>::ProbeHyperPath(const vtkm::Id re
} // root or attachment point
else
{ // ordinary supernode
hyperparent = this->Hyperparents.ReadPortal().Get(maskedHypertarget);
hyperparent = vtkm::cont::ArrayGetValue(maskedHypertarget, this->Hyperparents);
} // ordinary supernode
// now take the new superparent's hyperparent/hypertarget
hypertarget = this->Hyperarcs.ReadPortal().Get(hyperparent);
hypertarget = vtkm::cont::ArrayGetValue(hyperparent, this->Hyperarcs);
} // loop inwards
resultStream << "Probe Complete" << std::endl << std::endl;
@ -550,9 +553,7 @@ std::string HierarchicalContourTree<FieldType>::ProbeSuperPath(const vtkm::Id re
{
std::stringstream resultStream;
// find the superparent
vtkm::Id superparent = this->Superparents.ReadPortal().Get(regularId);
auto superarcsPortal = this->Superarcs.ReadPortal();
auto whichRoundPortal = this->WhichRound.ReadPortal();
vtkm::Id superparent = vtkm::cont::ArrayGetValue(regularId, this->Superparents);
// now trace the path inwards: terminate on last round when we have null hyperarc
vtkm::Id length = 0;
while (true)
@ -563,7 +564,7 @@ std::string HierarchicalContourTree<FieldType>::ProbeSuperPath(const vtkm::Id re
break;
}
// retrieve the target of the superarc
vtkm::Id supertarget = superarcsPortal.Get(superparent);
vtkm::Id supertarget = vtkm::cont::ArrayGetValue(superparent, this->Superarcs);
resultStream << "Superparent: " << this->SuperString(superparent) << std::endl;
resultStream << "Supertarget: "
@ -574,7 +575,7 @@ std::string HierarchicalContourTree<FieldType>::ProbeSuperPath(const vtkm::Id re
// mask the supertarget
vtkm::Id maskedSupertarget = vtkm::worklet::contourtree_augmented::MaskedIndex(supertarget);
// and retrieve it's supertarget
vtkm::Id nextSupertarget = superarcsPortal.Get(maskedSupertarget);
vtkm::Id nextSupertarget = vtkm::cont::ArrayGetValue(maskedSupertarget, this->Superarcs);
vtkm::Id maskedNextSupertarget =
vtkm::worklet::contourtree_augmented::MaskedIndex(nextSupertarget);
resultStream << "Next target: " << this->SuperString(nextSupertarget) << std::endl;
@ -583,7 +584,7 @@ std::string HierarchicalContourTree<FieldType>::ProbeSuperPath(const vtkm::Id re
if (vtkm::worklet::contourtree_augmented::NoSuchElement(nextSupertarget))
{ // root or attachment point
// test round: if it's the last one, only the root has a null edge
if (whichRoundPortal.Get(maskedNextSupertarget) == this->NumRounds)
if (vtkm::cont::ArrayGetValue(maskedNextSupertarget, this->WhichRound) == this->NumRounds)
// we're done
break;
else // attachment point
@ -629,6 +630,7 @@ std::string HierarchicalContourTree<FieldType>::PrintDotSuperStructure(const cha
outstream << "\t// Supernodes\n";
// loop through all supernodes
// We use regular ReadPortals here since this requires access to many values anyways
auto supernodesPortal = this->Supernodes.ReadPortal();
auto hypernodesPortal = this->Hypernodes.ReadPortal();
auto hyperparentsPortal = this->Hyperparents.ReadPortal();
@ -754,7 +756,7 @@ std::string HierarchicalContourTree<FieldType>::PrintDotSuperStructure(const cha
/// debug routine
template <typename FieldType>
std::string HierarchicalContourTree<FieldType>::DebugPrint(const char* message,
std::string HierarchicalContourTree<FieldType>::DebugPrint(std::string message,
const char* fileName,
long lineNum) const
{ // DebugPrint
@ -814,6 +816,7 @@ std::string HierarchicalContourTree<FieldType>::DebugPrint(const char* message,
"nSupernodes In Round", this->NumSupernodesInRound, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"nHypernodes In Round", this->NumHypernodesInRound, -1, resultStream);
//resultStream << "Owned Regular Vertices: " << this->NumOwnedRegularVertices << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(this->NumIterations.GetNumberOfValues(),
resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
@ -858,6 +861,84 @@ std::string HierarchicalContourTree<FieldType>::PrintTreeStats() const
} // PrintTreeStats
// modified version of dumpSuper() that also gives volume counts
template <typename FieldType>
std::string HierarchicalContourTree<FieldType>::DumpVolumes(
const vtkm::worklet::contourtree_augmented::IdArrayType& supernodes,
const vtkm::worklet::contourtree_augmented::IdArrayType& superarcs,
const vtkm::worklet::contourtree_augmented::IdArrayType& regularNodeGlobalIds,
vtkm::Id totalVolume,
const vtkm::worklet::contourtree_augmented::IdArrayType& intrinsicVolume,
const vtkm::worklet::contourtree_augmented::IdArrayType& dependentVolume)
{ // DumpVolumes()
// a local string stream to build the output
std::stringstream outStream;
// header info
outStream << "============" << std::endl;
outStream << "Contour Tree" << std::endl;
// loop through all superarcs.
// We use regular ReadPortals here since this requires access to many values anyways
auto supernodesPortal = supernodes.ReadPortal();
auto regularNodeGlobalIdsPortal = regularNodeGlobalIds.ReadPortal();
auto superarcsPortal = superarcs.ReadPortal();
auto intrinsicVolumePortal = intrinsicVolume.ReadPortal();
auto dependentVolumePortal = dependentVolume.ReadPortal();
for (vtkm::Id supernode = 0; supernode < supernodes.GetNumberOfValues(); supernode++)
{ // per supernode
// convert all the way down to global regular IDs
vtkm::Id fromRegular = supernodesPortal.Get(supernode);
vtkm::Id fromGlobal = regularNodeGlobalIdsPortal.Get(fromRegular);
// retrieve the superarc target
vtkm::Id toSuper = superarcsPortal.Get(supernode);
// if it is NO_SUCH_ELEMENT, it is the root or an attachment point
// for an augmented tree, it can only be the root
// in any event, we don't want to print them
if (vtkm::worklet::contourtree_augmented::NoSuchElement(toSuper))
{
continue;
}
// now break out the ascending flag & the underlying ID
bool superarcAscends = vtkm::worklet::contourtree_augmented::IsAscending(toSuper);
toSuper = vtkm::worklet::contourtree_augmented::MaskedIndex(toSuper);
vtkm::Id toRegular = supernodesPortal.Get(toSuper);
vtkm::Id toGlobal = regularNodeGlobalIdsPortal.Get(toRegular);
// compute the weights
vtkm::Id weight = dependentVolumePortal.Get(supernode);
// -1 because the validation output does not count the supernode for the superarc
vtkm::Id arcWeight = intrinsicVolumePortal.Get(supernode) - 1;
vtkm::Id counterWeight = totalVolume - weight + arcWeight;
// orient with high end first
if (superarcAscends)
{ // ascending superarc
outStream << "H: " << std::setw(VOLUME_PRINT_WIDTH) << toGlobal;
outStream << " L: " << std::setw(VOLUME_PRINT_WIDTH) << fromGlobal;
outStream << " VH: " << std::setw(VOLUME_PRINT_WIDTH) << weight;
outStream << " VR: " << std::setw(VOLUME_PRINT_WIDTH) << arcWeight;
outStream << " VL: " << std::setw(VOLUME_PRINT_WIDTH) << counterWeight;
outStream << std::endl;
} // ascending superarc
else
{ // descending superarc
outStream << "H: " << std::setw(VOLUME_PRINT_WIDTH) << fromGlobal;
outStream << " L: " << std::setw(VOLUME_PRINT_WIDTH) << toGlobal;
outStream << " VH: " << std::setw(VOLUME_PRINT_WIDTH) << counterWeight;
outStream << " VR: " << std::setw(VOLUME_PRINT_WIDTH) << arcWeight;
outStream << " VL: " << std::setw(VOLUME_PRINT_WIDTH) << weight;
outStream << std::endl;
} // descending superarc
} // per supernode
// return the string
return outStream.str();
} // DumpVolumes()
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm

@ -0,0 +1,712 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=======================================================================================
//
// Parallel Peak Pruning v. 2.0
//
// Started June 15, 2017
//
// Copyright Hamish Carr, University of Leeds
//
// HierarchicalHyperSweeper.h
//
//=======================================================================================
//
// COMMENTS:
//
// This class encapsulates a hypersweep over the hierarchical contour tree. It is a
// separate class primarily to keep the post-processing separate from the main tree
// construction, but it should also make it easier to generalise to arbitrary computations
//
// Basically, the way that this operates is:
// 1. First, we do a local (standard) hypersweep over the hierarchical tree
// 2. We then fan-in one round at a time. In each round,
// a. We trade the prefix of the array with our logical partner, then
// b. Combine the array prefix with our own
//
// Tactically, when we do MPI, we can either embed it in this unit, or leave it in the
// calling unit. For ease of porting, we will leave all MPI in the calling unit, so
// this unit only needs to do the combination.
//
// Note that we could define an operator to be passed in, and probably want to template it
// that way in the future, but for now, we'll do the first version directly with addition
//
// By assumption, we need a commutative property, since we do not guarantee that we have
// strict ordering along superarcs (which would require sharing a supernode sort with our
// partner, if not globally)
//
//=======================================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_h
#include <iomanip>
#include <string>
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/data_set_mesh/IdRelabeler.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/worklet/contourtree_distributed/PrintGraph.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/ComputeSuperarcDependentWeightsWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/ComputeSuperarcTransferWeightsWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/InitializeIntrinsicVertexCountInitalizeCountsWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/InitializeIntrinsicVertexCountSubtractLowEndWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/TransferTargetComperator.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/TransferWeightsUpdateLHEWorklet.h>
#include <vtkm/worklet/contourtree_distributed/hierarchical_hyper_sweeper/TransferWeightsUpdateRHEWorklet.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
// the class itself
template <typename SweepValueType, typename ContourTreeFieldType>
class HierarchicalHyperSweeper
{ // class HierarchicalHyperSweeper
public:
// the tree that it hypersweeps over
const HierarchicalContourTree<ContourTreeFieldType>& HierarchicalTree;
// the Id of the base block (used for debug output)
vtkm::Id BlockId;
// array of values being operated over (same size as supernode set)
// keep both intrinsic & dependent values
// the intrinsic values are just stored but not modifid here
const vtkm::cont::ArrayHandle<SweepValueType>& IntrinsicValues;
// the dependent values are what is being sweeped and are updated here
const vtkm::cont::ArrayHandle<SweepValueType>& DependentValues;
// and to avoid an extra log summation, store the number of logical nodes for the underlying block
// (computed when initializing the regular vertex list)
vtkm::Id NumOwnedRegularVertices;
// these are working arrays, lifted up here for ease of debug code
// Subranges of these arrays will be reused in the rounds / iterations rather than being reallocated
// an array for temporary storage of the prefix sums
vtkm::cont::ArrayHandle<SweepValueType> ValuePrefixSum;
// two arrays for collecting targets of transfers
vtkm::worklet::contourtree_augmented::IdArrayType TransferTarget;
vtkm::worklet::contourtree_augmented::IdArrayType SortedTransferTarget;
// an array for indirect sorting of sets of superarcs
vtkm::worklet::contourtree_augmented::IdArrayType SuperSortPermute;
/// Constructor
/// @param[in] blockId The Id of the base block (used for debug output)
/// @param[in] hierarchicalTree the tree that to hypersweeps over
/// @param[in] baseBlock the underlying mesh base block type
/// @param[in] intrinsicValues array of values of intrinisic nodes are just being stored here but not modified
/// @param[in] dependentValues array of values being operated over (same size as supernode set)
HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>(
vtkm::Id blockId,
const HierarchicalContourTree<ContourTreeFieldType>& hierarchicalTree,
const vtkm::cont::ArrayHandle<SweepValueType>& intrinsicValues,
const vtkm::cont::ArrayHandle<SweepValueType>& dependentValues);
/// Our routines to initialize the sweep need to be static (or externa)l if we are going to use the constructor
/// to run the actual hypersweep
/// @param[in] hierarchicalTree the tree that to hypersweeps over
/// @param[in] baseBlock the underlying mesh base block to initialize from
/// @param[in] localToGlobalIdRelabeler Id relabeler used to compute global indices from local mesh indices
/// @param[out] superarcRegularCounts arrray for the output superarc regular counts
template <typename MeshType>
void InitializeIntrinsicVertexCount(
const HierarchicalContourTree<ContourTreeFieldType>& hierarchicalTree,
const MeshType& baseBlock,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
vtkm::worklet::contourtree_augmented::IdArrayType& superarcRegularCounts);
/// routine to do the local hypersweep using addition / subtraction
/// The funtion use the ComputeSuperarcDependentWeights, ComputeSuperarcTransferWeights,
/// and TransferWeights functions to carry out the local hyper sweep
void LocalHyperSweep();
/// Debug routine to print contents of the HiearchicalHyperSweep
/// @param[in] message Message to print along the debug output
/// @param[in] fileName Name of the file the message is printed from. Usually set to __FILE__
/// @param[in] lineNum Line number in the file where the message is printed from. Usually set to __LINE__
std::string DebugPrint(std::string message, const char* fileName, long lineNum) const;
/// Routine to save the HierarchicalContourTree of this HierarchicalHyperSweeper to a Dot file
/// @param[in] message Message included in the file
/// @param[in] outFileName The name of the file to write the
void SaveHierarchicalContourTreeDot(std::string message, const char* outFileName) const;
protected:
// Functions used internally be LocalHyperSweep to compute the local hyper sweep
/// Routine to compute the correct weights dependent on each superarc in a subrange (defined by the round & iteration)
void ComputeSuperarcDependentWeights(vtkm::Id round,
vtkm::Id iteration,
vtkm::Id firstSupernode,
vtkm::Id lastSupernode);
/// routine to compute the weights to transfer to superarcs (defined by the round & iteration)
void ComputeSuperarcTransferWeights(vtkm::Id round,
vtkm::Id iteration,
vtkm::Id firstSupernode,
vtkm::Id lastSupernode);
/// routine to transfer the weights
void TransferWeights(vtkm::Id round,
vtkm::Id iteration,
vtkm::Id firstSupernode,
vtkm::Id lastSupernode);
private:
/// Used internally to Invoke worklets
vtkm::cont::Invoker Invoke;
}; // class HierarchicalHyperSweeper
template <typename SweepValueType, typename ContourTreeFieldType>
HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::HierarchicalHyperSweeper(
vtkm::Id blockId,
const HierarchicalContourTree<ContourTreeFieldType>& hierarchicalTree,
const vtkm::cont::ArrayHandle<SweepValueType>& intrinsicValues,
const vtkm::cont::ArrayHandle<SweepValueType>& dependentValues)
: HierarchicalTree(hierarchicalTree)
, BlockId(blockId)
, IntrinsicValues(intrinsicValues)
, DependentValues(dependentValues)
, NumOwnedRegularVertices(vtkm::Id{ 0 })
{ // constructor
// Initalize arrays with 0s
vtkm::cont::Algorithm::Fill(
this->ValuePrefixSum, vtkm::Id{ 0 }, this->HierarchicalTree.Supernodes.GetNumberOfValues());
vtkm::cont::Algorithm::Fill(
this->TransferTarget, vtkm::Id{ 0 }, this->HierarchicalTree.Supernodes.GetNumberOfValues());
vtkm::cont::Algorithm::Fill(this->SortedTransferTarget,
vtkm::Id{ 0 },
this->HierarchicalTree.Supernodes.GetNumberOfValues());
// Initialize the supersortPermute to the identity
vtkm::cont::ArrayHandleIndex tempIndexArray(
this->HierarchicalTree.Supernodes.GetNumberOfValues());
vtkm::cont::Algorithm::Copy(tempIndexArray, this->SuperSortPermute);
} // constructor
// static function used to compute the initial superarc regular counts
template <typename SweepValueType, typename ContourTreeFieldType>
template <typename MeshType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::InitializeIntrinsicVertexCount(
const HierarchicalContourTree<ContourTreeFieldType>& hierarchicalTree,
const MeshType& baseBlock,
const vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler& localToGlobalIdRelabeler,
vtkm::worklet::contourtree_augmented::IdArrayType& superarcRegularCounts)
{ // InitializeIntrinsicVertexCount()
// I. Call the mesh to get a list of all regular vertices belonging to the block by global Id
vtkm::worklet::contourtree_augmented::IdArrayType globalIds;
// TODO/FIXME: Even though the virtual function on DataSetMesh was removed in commit
// 93730495813f7b85e59d4a5dae2076977787fd78, this should call the correct function
// since MeshType is templated and should have the appropriate type. Verify that
// this is indeed correct.
baseBlock.GetOwnedVerticesByGlobalId(localToGlobalIdRelabeler, globalIds);
// and store the size for later reference
//hierarchicalTree.NumOwnedRegularVertices = globalIds.GetNumberOfValues();
this->NumOwnedRegularVertices = globalIds.GetNumberOfValues();
#ifdef DEBUG_PRINT
{
std::stringstream debugStream;
debugStream << std::endl << "Owned Regular Vertex List" << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(globalIds.GetNumberOfValues(), debugStream);
vtkm::worklet::contourtree_augmented::PrintIndices("GlobalId", globalIds, -1, debugStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, debugStream.str());
}
#endif
// II. Look up the global Ids in the hierarchical tree & convert to superparent Ids
vtkm::worklet::contourtree_augmented::IdArrayType superparents;
{ // scope to make sure temporary variables are deleted
auto findRegularByGlobal = hierarchicalTree.GetFindRegularByGlobal();
auto computeSuperparentIdsWorklet = vtkm::worklet::contourtree_distributed::
hierarchical_hyper_sweeper::InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet();
Invoke(computeSuperparentIdsWorklet, // worklet to run
globalIds, // input
findRegularByGlobal, // input
hierarchicalTree.Regular2Supernode, // input
hierarchicalTree.Superparents, // input
superparents // output
);
}
#ifdef DEBUG_PRINT
{
std::stringstream debugStream;
vtkm::worklet::contourtree_augmented::PrintIndices(
"Superparents", superparents, -1, debugStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, debugStream.str());
}
#endif
// III. Sort the superparent Ids & count the copies of each
vtkm::cont::Algorithm ::Sort(superparents);
#ifdef DEBUG_PRINT
{
std::stringstream debugStream;
vtkm::worklet::contourtree_augmented::PrintIndices("Sorted SP", superparents, -1, debugStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, debugStream.str());
}
#endif
// initialize the counts to zero.
vtkm::cont::Algorithm::Fill(
superarcRegularCounts, vtkm::Id{ 0 }, this->HierarchicalTree.Supernodes.GetNumberOfValues());
// set the count to the Id one off the high end of each range
Invoke(vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
InitializeIntrinsicVertexCountInitalizeCountsWorklet{},
superparents, // input domain
superarcRegularCounts // output
);
// now repeat to subtract out the low end
Invoke(vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
InitializeIntrinsicVertexCountSubtractLowEndWorklet{},
superparents, // input domain
superarcRegularCounts // output
);
// and that is that
#ifdef DEBUG_PRINT
{
std::stringstream debugStream;
vtkm::worklet::contourtree_augmented::PrintIndices(
"SuperarcRegularCounts", superarcRegularCounts, -1, debugStream);
VTKM_LOG_S(vtkm::cont::LogLevel::Info, debugStream.str());
}
#endif
} // InitializeIntrinsicVertexCount()
// routine to do the local hypersweep using addition / subtraction
template <typename SweepValueType, typename ContourTreeFieldType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::LocalHyperSweep()
{ // LocalHyperSweep()
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Starting Local HyperSweep"),
__FILE__,
__LINE__));
#endif
// I. Iterate over all rounds of the hyperstructure
for (vtkm::Id round = 0; round <= this->HierarchicalTree.NumRounds; round++)
{ // per round
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 0 Starting Round"),
__FILE__,
__LINE__));
#endif
// A. Iterate over all iterations of the round
auto numIterationsPortal =
this->HierarchicalTree.NumIterations
.ReadPortal(); // TODO/FIXME: Use portal? Or something more efficient?
for (vtkm::Id iteration = 0; iteration < numIterationsPortal.Get(round); iteration++)
{ // per iteration
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 1 Iteration ") + std::to_string(iteration) +
std::string(" Step A Starting Iteration"),
__FILE__,
__LINE__));
#endif
// 1. Establish the range of supernode Ids that we want to process
// TODO/FIXME: Use portal? Or is there a more efficient way?
auto firstSupernodePerIterationPortal =
this->HierarchicalTree.FirstSupernodePerIteration[round].ReadPortal();
vtkm::Id firstSupernode = firstSupernodePerIterationPortal.Get(iteration);
vtkm::Id lastSupernode = firstSupernodePerIterationPortal.Get(iteration + 1);
// call the routine that computes the dependent weights for each superarc in that range
this->ComputeSuperarcDependentWeights(round, iteration, firstSupernode, lastSupernode);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 1 Iteration ") + std::to_string(iteration) +
std::string(" Step B Dependent Weights Computed"),
__FILE__,
__LINE__));
#endif
// now call the routine that computes the weights to be transferred and the superarcs to which they transfer
this->ComputeSuperarcTransferWeights(round, iteration, firstSupernode, lastSupernode);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 1 Iteration ") + std::to_string(iteration) +
std::string(" Step C Transfer Weights Computed"),
__FILE__,
__LINE__));
#endif
// transfer the weights
this->TransferWeights(round, iteration, firstSupernode, lastSupernode);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 1 Iteration ") + std::to_string(iteration) +
std::string(" Step D Weights Transferred"),
__FILE__,
__LINE__));
#endif
} // per iteration
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
DebugPrint(std::string("Hypersweep Block ") + std::to_string(BlockId) +
std::string(" Round ") + std::to_string(round) +
std::string(" Step 2 Ending Round"),
__FILE__,
__LINE__));
#endif
} // per round
} // LocalHyperSweep()
// routine to compute the correct weights dependent on each superarc in a subrange (defined by the round & iteration)
template <typename SweepValueType, typename ContourTreeFieldType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::
ComputeSuperarcDependentWeights(
vtkm::Id round,
vtkm::Id, // iteration, // Kept parameter in case we need it for debugging.
vtkm::Id firstSupernode,
vtkm::Id lastSupernode)
{ // ComputeSuperarcDependentWeights()
vtkm::Id numSupernodesToProcess = lastSupernode - firstSupernode;
// 2. Use sorted prefix sum to compute the total weight to contribute to the super/hypertarget
// Same as std::partial_sum(sweepValues.begin() + firstSupernode, sweepValues.begin() + lastSupernode, valuePrefixSum.begin() + firstSupernode);
{
// DependentValues[firstSuperNode, lastSupernode)
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
dependentValuesView(this->DependentValues, // subset DependentValues
firstSupernode, // start at firstSupernode
numSupernodesToProcess); // until lastSuperNode (not inclued)
// Target array
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
valuePrefixSumView(this->ValuePrefixSum, // subset ValuePrefixSum
firstSupernode, // start at firstSupernode
numSupernodesToProcess); // until lastSuperNode (not inclued)
// Compute the partial sum for DependentValues[firstSuperNode, lastSupernode) and write to ValuePrefixSum[firstSuperNode, lastSupernode)
vtkm::cont::Algorithm::ScanInclusive(dependentValuesView, // input
valuePrefixSumView); // result of partial sum
}
// Since the prefix sum is over *all* supernodes in the iteration, we need to break it into segments
// There are two cases we have to worry about:
// a. Hyperarcs made up of multiple supernodes
// b. Attachment points (which don't have a corresponding hyperarc)
// and they can be mixed in any given iteration
// Since we have the prefix sum in a separate array, we avoid read/write conflicts
// 3. Compute the segmented weights from the prefix sum array
{
// Create views of the subranges of the arrays we need to update
vtkm::cont::ArrayHandleCounting<vtkm::Id> supernodeIndex(
firstSupernode, vtkm::Id{ 1 }, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
hierarchicalTreeSuperarcsView(
this->HierarchicalTree.Superarcs, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
hierarchicalTreeHyperparentsView(
this->HierarchicalTree.Hyperparents, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
hierarchicalTreeHypernodesView(
this->HierarchicalTree.Hypernodes, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
dependentValuesView(this->DependentValues, firstSupernode, numSupernodesToProcess);
// create the worklet
vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
ComputeSuperarcDependentWeightsWorklet<SweepValueType>
computeSuperarcDependentWeightsWorklet(
firstSupernode, round, this->HierarchicalTree.NumRounds);
// Execute the worklet
this->Invoke(
computeSuperarcDependentWeightsWorklet, // the worklet
supernodeIndex, // input counting index [firstSupernode, lastSupernode)
hierarchicalTreeSuperarcsView, // input view of hierarchicalTree.Superarcs[firstSupernode, lastSupernode)
hierarchicalTreeHyperparentsView, // input view of hierarchicalTree.Hyperparents[firstSupernode, lastSupernode)
this->HierarchicalTree.Hypernodes, // input full hierarchicalTree.Hypernodes array
this->ValuePrefixSum, // input full ValuePrefixSum array
dependentValuesView // output view of sweepValues[firstSu
);
}
} // ComputeSuperarcDependentWeights()
// routine to compute the weights to transfer to superarcs (defined by the round & iteration)
template <typename SweepValueType, typename ContourTreeFieldType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::ComputeSuperarcTransferWeights(
vtkm::Id round,
vtkm::Id, // iteration, // Kept parameter in case we need it for debugging.
vtkm::Id firstSupernode,
vtkm::Id lastSupernode)
{ // ComputeSuperarcTransferWeights()
// At this stage, we would otherwise transfer weights by hyperarc, but attachment points don't *have* hyperarcs
// so we will do a transfer by superarc instead, making sure that we only transfer from the last superarc in each
// hyperarc, plus for any attachment point
vtkm::Id numSupernodesToProcess = lastSupernode - firstSupernode;
// 4. Set the amount each superarc wants to transfer, reusing the valuePrefixSum array for the purpose
// and the transfer target
{ // scope ComputeSuperarcTransferWeightsWorklet to make sure temp variables are cleared
// Create ArrayHandleViews of the subrange of values that we need to update
vtkm::cont::ArrayHandleCounting<vtkm::Id> supernodeIndex(
firstSupernode, vtkm::Id{ 1 }, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
hierarchicalTreeSupernodesView(
this->HierarchicalTree.Supernodes, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
hierarchicalTreeSuperarcsView(
this->HierarchicalTree.Superarcs, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
transferTargetView(this->TransferTarget, firstSupernode, numSupernodesToProcess);
// instantiate the worklet
vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
ComputeSuperarcTransferWeightsWorklet computeSuperarcTransferWeightsWorklet(
round, this->HierarchicalTree.NumRounds, lastSupernode);
// call the worklet
this->Invoke(
computeSuperarcTransferWeightsWorklet, // worklet
supernodeIndex, // input counting array [firstSupernode, lastSupernode)
hierarchicalTreeSupernodesView, // input view of hierarchicalTree.supernodes[firstSupernode, lastSupernode)
this->HierarchicalTree.Superparents, // input whole array of hierarchicalTree.superparents
this->HierarchicalTree.Hyperparents, // input whole array of hierarchicalTree.hyperparents
hierarchicalTreeSuperarcsView, // input/output view of hierarchicalTree.superarcs[firstSupernode, lastSupernode)
transferTargetView // input view of transferTarget[firstSupernode, lastSupernode)
);
} // scope ComputeSuperarcTransferWeightsWorklet
// 5. Now we need to sort the transfer targets into contiguous segments
{
// create view of superSortPermute[firstSupernode, lastSupernode) for sorting
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
superSortPermuteView(this->SuperSortPermute, firstSupernode, numSupernodesToProcess);
// create comperator for the sort
vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::TransferTargetComperator
transferTargetComperator(this->TransferTarget);
// sort the subrange of our array
vtkm::cont::Algorithm::Sort(superSortPermuteView, transferTargetComperator);
}
// 6. The [first,last] subrange is now permuted, so we can copy the transfer targets and weights into arrays
// The following code block implements the following for loop using fancy array handles and copy
// for (vtkm::Id supernode = firstSupernode; supernode < lastSupernode; supernode++)
// {
// sortedTransferTarget[supernode] = transferTarget[superSortPermute[supernode]];
// valuePrefixSum[supernode] = sweepValues[superSortPermute[supernode]];
// }
{
// copy transfer target in the sorted order
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
sortedTransferTargetView(this->SortedTransferTarget, firstSupernode, numSupernodesToProcess);
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
superSortPermuteView(this->SuperSortPermute, firstSupernode, numSupernodesToProcess);
auto permutedTransferTarget =
vtkm::cont::make_ArrayHandlePermutation(superSortPermuteView, // idArray
this->TransferTarget); // valueArray
vtkm::cont::Algorithm::Copy(permutedTransferTarget, sortedTransferTargetView);
// Note that any values associated with NO_SUCH_ELEMENT will be ignored
// copy transfer weight in the sorted order
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
valuePrefixSumView(this->ValuePrefixSum, firstSupernode, numSupernodesToProcess);
auto permutedDependentValues =
vtkm::cont::make_ArrayHandlePermutation(superSortPermuteView, // idArray
this->DependentValues); // valueArray
vtkm::cont::Algorithm::Copy(permutedDependentValues, valuePrefixSumView);
}
} // ComputeSuperarcTransferWeights()
// routine to transfer the weights
template <typename SweepValueType, typename ContourTreeFieldType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::TransferWeights(
vtkm::Id, // round, // Kept parameters in case we need it for debugging.
vtkm::Id, // iteration, // Kept parameters in case we need it for debugging.
vtkm::Id firstSupernode,
vtkm::Id lastSupernode)
{ // TransferWeights()
// 7. Now perform a segmented prefix sum
vtkm::Id numSupernodesToProcess = lastSupernode - firstSupernode;
// Same as std::partial_sum(valuePrefixSum.begin() + firstSupernode, valuePrefixSum.begin() + lastSupernode, valuePrefixSum.begin() + firstSupernode);
{
// ValuePrefixSum[firstSuperNode, lastSupernode)
vtkm::cont::ArrayHandleView<vtkm::worklet::contourtree_augmented::IdArrayType>
valuePrefixSumView(this->ValuePrefixSum, // subset ValuePrefixSum
firstSupernode, // start at firstSupernode
numSupernodesToProcess); // until lastSuperNode (not inclued)
// TODO: If it is safe to use the same array as input and output for ScanInclusive then this code should be updated to avoid the extra copy
// In this case our traget array is the same as our source array. For safety we
// store the values of our prefix sum in a temporary arrya and then copy the values
// back into our valuePrefixSumView at the end
vtkm::worklet::contourtree_augmented::IdArrayType tempScanInclusiveTarget;
tempScanInclusiveTarget.Allocate(numSupernodesToProcess);
// Compute the partial sum for DependentValues[firstSuperNode, lastSupernode) and write to ValuePrefixSum[firstSuperNode, lastSupernode)
vtkm::cont::Algorithm::ScanInclusive(valuePrefixSumView, // input
tempScanInclusiveTarget); // result of partial sum
// Now copy the values from our prefix sum back
vtkm::cont::Algorithm::Copy(tempScanInclusiveTarget, valuePrefixSumView);
}
// 7a. and 7b.
{
// 7a. Find the RHE of each group and transfer the prefix sum weight
// Note that we do not compute the transfer weight separately, we add it in place instead
// Instantiate the worklet
auto supernodeIndex =
vtkm::cont::make_ArrayHandleCounting(firstSupernode, vtkm::Id{ 1 }, numSupernodesToProcess);
VTKM_ASSERT(firstSupernode + numSupernodesToProcess <=
this->ValuePrefixSum.GetNumberOfValues());
auto valuePrefixSumView = vtkm::cont::make_ArrayHandleView(
this->ValuePrefixSum, firstSupernode, numSupernodesToProcess);
vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
TransferWeightsUpdateRHEWorklet transferWeightsUpdateRHEWorklet(lastSupernode);
// Invoke the worklet
this->Invoke(transferWeightsUpdateRHEWorklet, // worklet
supernodeIndex, // input counting array [firstSupernode, lastSupernode)
this->SortedTransferTarget,
valuePrefixSumView, // input view of valuePrefixSum[firstSupernode, lastSupernode)
this->DependentValues);
}
{
VTKM_ASSERT(firstSupernode + 1 + numSupernodesToProcess - 1 <=
this->SortedTransferTarget.GetNumberOfValues());
auto sortedTransferTargetView = vtkm::cont::make_ArrayHandleView(
this->SortedTransferTarget, firstSupernode + 1, numSupernodesToProcess - 1);
VTKM_ASSERT(firstSupernode + 1 + numSupernodesToProcess - 1 <=
this->SortedTransferTarget.GetNumberOfValues());
auto sortedTransferTargetShiftedView = vtkm::cont::make_ArrayHandleView(
this->SortedTransferTarget, firstSupernode, numSupernodesToProcess - 1);
auto valuePrefixSumPreviousValueView = vtkm::cont::make_ArrayHandleView(
this->ValuePrefixSum, firstSupernode, numSupernodesToProcess - 1);
// 7b. Now find the LHE of each group and subtract out the prior weight
vtkm::worklet::contourtree_distributed::hierarchical_hyper_sweeper::
TransferWeightsUpdateLHEWorklet transferWeightsUpdateLHEWorklet;
this->Invoke(transferWeightsUpdateLHEWorklet,
sortedTransferTargetView,
sortedTransferTargetShiftedView,
valuePrefixSumPreviousValueView,
this->DependentValues);
}
} // TransferWeights()
// debug routine
template <typename SweepValueType, typename ContourTreeFieldType>
std::string HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::DebugPrint(
std::string message,
const char* fileName,
long lineNum) const
{ // DebugPrint()
std::stringstream resultStream;
resultStream << std::endl;
resultStream << "----------------------------------------" << std::endl;
resultStream << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4)
<< lineNum << std::endl;
resultStream << std::left << message << std::endl;
resultStream << "Hypersweep Value Array Contains: " << std::endl;
resultStream << "----------------------------------------" << std::endl;
resultStream << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(this->DependentValues.GetNumberOfValues(),
resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic", this->IntrinsicValues, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent", this->DependentValues, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Prefix Sum", this->ValuePrefixSum, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Transfer To", this->TransferTarget, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Sorted Transfer", this->SortedTransferTarget, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Sort Permute", this->SuperSortPermute, -1, resultStream);
return resultStream.str();
} // DebugPrint()
// Routine to save the hierarchical tree to file
template <typename SweepValueType, typename ContourTreeFieldType>
void HierarchicalHyperSweeper<SweepValueType, ContourTreeFieldType>::SaveHierarchicalContourTreeDot(
std::string message,
const char* outFileName) const
{ // SaveHierarchicalContourTreeDot()
std::string hierarchicalTreeDotString =
HierarchicalContourTreeDotGraphPrint<vtkm::worklet::contourtree_augmented::IdArrayType>(
message,
this->HierarchicalTree,
SHOW_SUPER_STRUCTURE | SHOW_HYPER_STRUCTURE | SHOW_ALL_IDS | SHOW_ALL_SUPERIDS |
SHOW_ALL_HYPERIDS | SHOW_EXTRA_DATA, //|GV_NODE_NAME_USES_GLOBAL_ID
this->BlockId,
this->DependentValues);
std::ofstream hierarchicalTreeFile(outFileName);
hierarchicalTreeFile << hierarchicalTreeDotString;
} // SaveHierarchicalContourTreeDot
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,113 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hypersweepblock_h
#define vtk_m_worklet_contourtree_distributed_hypersweepblock_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
template <typename ContourTreeDataFieldType>
struct HyperSweepBlock
{
HyperSweepBlock(
const vtkm::Id& localBlockNo,
const int globalBlockId,
const vtkm::Id3& origin,
const vtkm::Id3& size,
const vtkm::Id3& globalSize,
const vtkm::worklet::contourtree_distributed::HierarchicalContourTree<ContourTreeDataFieldType>&
hierarchicalContourTree)
: LocalBlockNo(localBlockNo)
, GlobalBlockId(globalBlockId)
, Origin(origin)
, Size(size)
, GlobalSize(globalSize)
, HierarchicalContourTree(hierarchicalContourTree)
{
}
// Mesh information
vtkm::Id LocalBlockNo;
int GlobalBlockId;
vtkm::Id3 Origin;
vtkm::Id3 Size;
vtkm::Id3 GlobalSize;
// Hierarchical contour tree for this block
const vtkm::worklet::contourtree_distributed::HierarchicalContourTree<ContourTreeDataFieldType>&
HierarchicalContourTree;
// Computed values
vtkm::cont::ArrayHandle<vtkm::Id> IntrinsicVolume;
vtkm::cont::ArrayHandle<vtkm::Id> DependentVolume;
// Destroy function allowing DIY to own blocks and clean them up after use
static void destroy(void* b)
{
delete static_cast<HyperSweepBlock<ContourTreeDataFieldType>*>(b);
}
};
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -1013,7 +1013,7 @@ template <typename FieldType>
// template <typename FieldType, typename VectorType>
std::string HierarchicalContourTreeDotGraphPrint(
const std::string& label, // the label to use as title for the graph
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType>&
const vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType>&
hierarchicalTree, // the hierarchical contour tree itself
const vtkm::Id showMask = vtkm::worklet::contourtree_distributed::
SHOW_HIERARCHICAL_STANDARD) // mask with flags for what elements to show

@ -66,6 +66,7 @@
#include <vtkm/worklet/contourtree_augmented/ContourTree.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/worklet/contourtree_distributed/InteriorForest.h>
#include <vtkm/worklet/contourtree_distributed/tree_grafter/CalculateAttachementCounterWorklet.h>
#include <vtkm/worklet/contourtree_distributed/tree_grafter/CollapseRegularChainsWorklet.h>
#include <vtkm/worklet/contourtree_distributed/tree_grafter/CopyFirstHypernodePerIterationWorklet.h>
#include <vtkm/worklet/contourtree_distributed/tree_grafter/CopyFirstSupernodePerIterationWorklet.h>
@ -267,40 +268,6 @@ public:
private:
/// Used internally to Invoke worklets
vtkm::cont::Invoker Invoke;
/// Internal helper function used to resize ArrayHandles since VTKm does not provide a
/// method to grow a vector without loosing the original data values. The input array
/// is modified or replaced
/// @param[in] thearray The 1D array to be resized
/// @param[in] newSize The new size the array should be changed to
/// @param[in] fillValue The value to be used to fill the array
template <typename ValueType>
static void ResizeVector(vtkm::cont::ArrayHandle<ValueType>& thearray,
vtkm::Id newSize,
ValueType fillValue)
{
vtkm::Id oldSize = thearray.GetNumberOfValues();
// Simply return if the size of the array does not change
if (oldSize == newSize)
{
return;
}
// Resize the array but keep the original values
thearray.Allocate(newSize, vtkm::CopyFlag::On);
// Add the fill values to the array if we increased the size of the array
if (oldSize < newSize)
{
vtkm::cont::Algorithm::CopySubRange(
vtkm::cont::ArrayHandleConstant<ValueType>(fillValue, newSize - oldSize), // copy
0, // start copying from first index
newSize - oldSize, // num values to copy
thearray, // target array to copy to
oldSize // start copy to after oldSize
);
}
}
}; // class TreeGrafter
@ -354,7 +321,18 @@ void TreeGrafter<MeshType, FieldType>::GraftInteriorForests(
// count the number of iterations
this->NumTransferIterations = 0;
// Now loop to transfer one iteration at a time
// There are several cases we need to handle properly
// 1. We could have a round with no superarcs to add (in which case we are
// guaranteed not to have attachment points)
// 2. We could have a round with some superarcs but no attachment points
// (because we attach to existing supernodes)
// 3. We could have a round with attachment points to add
// Attachment points are interior, so are never added to the active superarc
// list in the first place. This means that we need to have an extra round
// some of the time to transfer attachment points. So the logic is:
// first we transfer all active superarcs, then we test (somehow) for having
// attachment points to transfer
// Loop to transfer active superarcs with a variation of the PPP transfer phase
// We stop when all that is left are attachment points (which aren't included in the active list)
while (this->ActiveSuperarcs.GetNumberOfValues() > 0)
{ // loop to transfer
@ -381,18 +359,37 @@ void TreeGrafter<MeshType, FieldType>::GraftInteriorForests(
DebugPrint("Finished Transfer Iterations", __FILE__, __LINE__));
#endif
// Now set the transfer iteration for all attachment points
// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
auto setTransferIterationWorklet = vtkm::worklet::contourtree_distributed::tree_grafter::
GraftInteriorForestsSetTransferIterationWorklet(this->NumTransferIterations);
this->Invoke(setTransferIterationWorklet,
this->SupernodeType, // input
this->HierarchicalSuperId, // input
this->WhenTransferred // output
// At this point, we can check to see whether all supernodes in the residue have already been transferred
// length of the attachementCounter will be set to (this->ContourTree->Supernodes.GetNumberOfValue());
// as a result of the worklet
vtkm::worklet::contourtree_augmented::IdArrayType attachmentCounter;
vtkm::worklet::contourtree_distributed::tree_grafter::CalculateAttachementCounterWorklet
calculateAttachementCounterWorklet;
this->Invoke(calculateAttachementCounterWorklet, // worklet
this->SupernodeType, // input
this->HierarchicalSuperId, // input
attachmentCounter // output
);
// Compute the sum of all values in attachmentCounter. vtkm::Add() is the default (so it could be omitted).
// We include it here to be more explicit about what Reduce does.
vtkm::Id numAttachmentPoints =
vtkm::cont::Algorithm::Reduce(attachmentCounter, static_cast<vtkm::Id>(0), vtkm::Add());
// and increment the number of iterations
this->NumTransferIterations++;
// if there are any at all, we need an extra iteration
if (numAttachmentPoints > 0)
{ // attachment points needing transfer
// Now set the transfer iteration for all attachment points
// If there were no supernodes to transfer, their types are all NO_SUCH_ELEMENT
auto setTransferIterationWorklet = vtkm::worklet::contourtree_distributed::tree_grafter::
GraftInteriorForestsSetTransferIterationWorklet(this->NumTransferIterations);
this->Invoke(setTransferIterationWorklet,
this->SupernodeType, // input
this->HierarchicalSuperId, // input
this->WhenTransferred // output
);
// and increment the number of iterations
this->NumTransferIterations++;
} // attachment points needing transfer
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
@ -575,7 +572,8 @@ void TreeGrafter<MeshType, FieldType>::InitializeActiveSuperarcs()
// vtkm::cont::Algorithm::ScanInclusive(activeSuperarcId , activeSuperarcId);
}
// the final element will hold the result
vtkm::Id nFree = activeSuperarcId.ReadPortal().Get(activeSuperarcId.GetNumberOfValues() - 1);
vtkm::Id nFree =
vtkm::cont::ArrayGetValue(activeSuperarcId.GetNumberOfValues() - 1, activeSuperarcId);
// TODO FIX nFree is 0 here. Check that this is correct. I believe it should be non-zero.
// resize the active list accordingly
this->ActiveSuperarcs.Allocate(nFree);
@ -632,9 +630,10 @@ template <typename MeshType, typename FieldType>
void TreeGrafter<MeshType, FieldType>::FindCriticalPoints()
{ // FindCriticalPoints()
// allocate memory for type of supernode
this->ResizeVector(this->SupernodeType,
this->ContourTree.Supernodes.GetNumberOfValues(),
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector(
this->SupernodeType,
this->ContourTree.Supernodes.GetNumberOfValues(),
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
// Reset the UpNeighbour and DownNeighbour array
vtkm::cont::Algorithm::Copy(
vtkm::cont::make_ArrayHandleConstant(vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT,
@ -1082,13 +1081,15 @@ void TreeGrafter<MeshType, FieldType>::CopyNewHypernodes(
{
// Resize array to length totalNHypernodes and fill new values with NO_SUCH_ELEMENT (or 0) (while keeping original values)
// NOTE: hierarchicalTree.Superchildren is initalized here but not used by this function
this->ResizeVector<vtkm::Id>(hierarchicalTree.Hypernodes,
totalNHypernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.Hyperarcs,
totalNHypernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Hypernodes,
totalNHypernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Hyperarcs,
totalNHypernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Superchildren, totalNHypernodes, static_cast<vtkm::Id>(0));
}
// B. Copy in the hypernodes & hyperarcs
@ -1133,32 +1134,39 @@ void TreeGrafter<MeshType, FieldType>::CopyNewSupernodes(
vtkm::Id nNewSupernodes = this->NewSupernodes.GetNumberOfValues();
vtkm::Id totalNSupernodes = nOldSupernodes + nNewSupernodes;
// Resize array to length totalNHypernodes and fill new values with NO_SUCH_ELEMENT (while keeping original values)
this->ResizeVector<vtkm::Id>(hierarchicalTree.Supernodes,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.Superarcs,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.Hyperparents,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.Super2Hypernode,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.WhichRound,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->ResizeVector<vtkm::Id>(hierarchicalTree.WhichIteration,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Supernodes,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Superarcs,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Hyperparents,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Super2Hypernode,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.WhichRound,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.WhichIteration,
totalNSupernodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
// we will need this here, since we need to set some new superparents here for supernodes added
vtkm::Id nOldNodes = hierarchicalTree.RegularNodeGlobalIds.GetNumberOfValues();
vtkm::Id nNewNodes = this->NewNodes.GetNumberOfValues();
vtkm::Id totalNNodes = nOldNodes + nNewNodes;
this->ResizeVector<vtkm::Id>(hierarchicalTree.Superparents,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Superparents,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
// B. Copy in the supernodes, &c.
auto copyNewSupernodesWorklet =
@ -1249,9 +1257,10 @@ void TreeGrafter<MeshType, FieldType>::CopyNewNodes(
vtkm::Id totalNNodes = nOldNodes + nNewNodes;
// A. We start by finding & copying the global IDs for every regular node
this->ResizeVector<vtkm::Id>(hierarchicalTree.RegularNodeGlobalIds,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.RegularNodeGlobalIds,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
// NOTE: The original code created a separate array newNodesGloablId that was set
// to NO_SUCH_ELEMENT first but we should only need the fancy array here and save the memory
auto newNodesGloablId =
@ -1292,9 +1301,10 @@ void TreeGrafter<MeshType, FieldType>::CopyNewNodes(
// C. Then we add the new array indices to the sort and resort it
// Resize and initialize hierarchicalTree.RegularNodeSortOrder with NO_SUCH_ELEMENT
// TODO: We should be able to shortcut this since the last values are set next in the CopySubrange
this->ResizeVector<vtkm::Id>(hierarchicalTree.RegularNodeSortOrder,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.RegularNodeSortOrder,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
{
// Do the following: std::iota(hierarchicalTree.regularNodeSortOrder.begin() + nOldNodes, hierarchicalTree.regularNodeSortOrder.end(), nOldNodes);
auto tempCountingArray = vtkm::cont::ArrayHandleCounting<vtkm::Id>(
@ -1320,9 +1330,10 @@ void TreeGrafter<MeshType, FieldType>::CopyNewNodes(
// D. now loop through the supernodes to set their lookup index from regular IDs
// Resize and initialize hierarchicalTree.Regular2Supernode with NO_SUCH_ELEMENT
this->ResizeVector<vtkm::Id>(hierarchicalTree.Regular2Supernode,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.Regular2Supernode,
totalNNodes,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
{
// The code in this block does the following in serial
// for (indexType newSupernode = hierarchicalTree.supernodes.size() - newSupernodes.size(); newSupernode < hierarchicalTree.supernodes.size(); newSupernode++)
@ -1391,14 +1402,15 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
#endif
// update the round counts
hierarchicalTree.NumRegularNodesInRound.WritePortal().Set(theRound,
this->NewNodes.GetNumberOfValues());
hierarchicalTree.NumSupernodesInRound.WritePortal().Set(theRound,
this->NewSupernodes.GetNumberOfValues());
hierarchicalTree.NumHypernodesInRound.WritePortal().Set(theRound,
this->NewHypernodes.GetNumberOfValues());
// the -1 is because the last iteration is just setting attachment points
hierarchicalTree.NumIterations.WritePortal().Set(theRound, this->NumTransferIterations - 1);
vtkm::worklet::contourtree_augmented::IdArraySetValue(
theRound, this->NewNodes.GetNumberOfValues(), hierarchicalTree.NumRegularNodesInRound);
vtkm::worklet::contourtree_augmented::IdArraySetValue(
theRound, this->NewSupernodes.GetNumberOfValues(), hierarchicalTree.NumSupernodesInRound);
vtkm::worklet::contourtree_augmented::IdArraySetValue(
theRound, this->NewHypernodes.GetNumberOfValues(), hierarchicalTree.NumHypernodesInRound);
// last iteration is just setting attachment points (but we are including this now) (previously added -1)
vtkm::worklet::contourtree_augmented::IdArraySetValue(
theRound, this->NumTransferIterations, hierarchicalTree.NumIterations);
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
@ -1414,7 +1426,6 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
vtkm::Id nOldHypernodes = nTotalHypernodes - nNewHypernodes;
#ifdef DEBUG_PRINT
// TODO: Hamish why do we need this debug print. It looks like the hierarchical tree does not change
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
hierarchicalTree.DebugPrint("Node Counts Retrieved", __FILE__, __LINE__));
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
@ -1422,9 +1433,9 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
#endif
// and set the per round iteration counts. There may be smarter ways of doing this, but . . .
this->ResizeVector<vtkm::Id>(
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.FirstSupernodePerIteration[static_cast<std::size_t>(theRound)],
this->NumTransferIterations,
this->NumTransferIterations + 1,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
{
auto copyFirstSupernodePerIterationWorklet =
@ -1438,6 +1449,13 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
hierarchicalTree.WhichIteration, // input
hierarchicalTree.FirstSupernodePerIteration[static_cast<std::size_t>(theRound)] // output.
);
// force the extra one to be one-off-the end for safety
vtkm::worklet::contourtree_augmented::IdArraySetValue(
this->NumTransferIterations, // index to set
hierarchicalTree.Supernodes.GetNumberOfValues(), // value to set
hierarchicalTree.FirstSupernodePerIteration[theRound] // array to modify
);
}
#ifdef DEBUG_PRINT_GRAFT_RESIDUE
@ -1445,11 +1463,16 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
hierarchicalTree.DebugPrint("Supernode Iteration Counts Set", __FILE__, __LINE__));
#endif
// Initalize hierarchicalTree.FirstHypernodePerIteration with NO_SUCH_ELEMENT
this->ResizeVector<vtkm::Id>(
// we add one so we don't need special cases when establishing subranges
// There's a tricky case to be dealt with due to attachment points - the last (extra) iteration transfers supernodes
// with a "virtual" superarc but no hyperarc. This can only occur in the final iteration, in which case the correct value is
// the "off the end" sentinel. But it is also possible for there to be no attachment points, in which case the final iteration
// will have some other value. Also, we need to set the "off the end" for the extra entry in any event.
// THEREFORE, instead of instantiating to NO_SUCH_ELEMENT for safety, we instantiate to the hypernodes.size()
vtkm::worklet::contourtree_augmented::ResizeVector<vtkm::Id>(
hierarchicalTree.FirstHypernodePerIteration[static_cast<std::size_t>(theRound)],
this->NumTransferIterations,
vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
this->NumTransferIterations + 1,
hierarchicalTree.Hypernodes.GetNumberOfValues());
// copy the approbriat hierarchicalTree.FirstHypernodePerIteration values
{
auto copyFirstHypernodePerIterationWorklet =
@ -1466,10 +1489,6 @@ void TreeGrafter<MeshType, FieldType>::CopyIterationDetails(
);
}
// force the extra one to be one-off-the end for safety
hierarchicalTree.FirstHypernodePerIteration[static_cast<size_t>(theRound)].WritePortal().Set(
this->NumTransferIterations - 1, hierarchicalTree.Hypernodes.GetNumberOfValues());
#ifdef DEBUG_PRINT
VTKM_LOG_S(vtkm::cont::LogLevel::Info,
hierarchicalTree.DebugPrint("Hypernode Iteration Counts Set", __FILE__, __LINE__));

@ -0,0 +1,202 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// COMMENTS:
//
// A comparator that sorts supernode pairs by:
// 1. the superparent (ie the superarc into which an attachment point inserts)
// note that this implicitly sorts on round of insertion as well
// 2. data value
// 3. global regular ID
//
// The superparent is assumed to have a flag indicating ascending/descending, and this
// needs to be used to get the correct inwards ordering along each superarc
//
//=======================================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_and_supernode_comparator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_and_supernode_comparator_h
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Compartor implementation used in HierarchicalAugmenter<FieldType>::ResizeArrays to sort this->SupernodeSorter
/// A comparator that sorts supernode pairs by:
/// 1. the superparent (ie the superarc into which an attachment point inserts)
/// note that this implicitly sorts on round of insertion as well
/// 2. data value
/// 3. global regular ID
///
/// The superparent is assumed to have a flag indicating ascending/descending, and this
/// needs to be used to get the correct inwards ordering along each superarc
template <typename FieldType>
class AttachmentAndSupernodeComparatorImpl
{
public:
using IdArrayPortalType =
typename vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
using FieldArrayPortalType = typename vtkm::cont::ArrayHandle<FieldType>::ReadPortalType;
// constructor
VTKM_CONT
AttachmentAndSupernodeComparatorImpl(IdArrayPortalType superparentSetPortal,
FieldArrayPortalType dataValueSetPortal,
IdArrayPortalType globalRegularIdSetPortal)
: SuperparentSetPortal(superparentSetPortal)
, DataValueSetPortal(dataValueSetPortal)
, GlobalRegularIdSetPortal(globalRegularIdSetPortal)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& left, const vtkm::Id& right) const
{ // operator()
// first comparison is on superparent WITHOUT ascending descending flag
if (vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentSetPortal.Get(left)) <
vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentSetPortal.Get(right)))
{
return true;
}
if (vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentSetPortal.Get(left)) >
vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentSetPortal.Get(right)))
{
return false;
}
// second comparison is on data value
if (this->DataValueSetPortal.Get(left) < this->DataValueSetPortal.Get(right))
{
return vtkm::worklet::contourtree_augmented::IsAscending(
this->SuperparentSetPortal.Get(left));
}
if (this->DataValueSetPortal.Get(left) > this->DataValueSetPortal.Get(right))
{
return !vtkm::worklet::contourtree_augmented::IsAscending(
this->SuperparentSetPortal.Get(left));
}
// third comparison is on global regular ID
if (this->GlobalRegularIdSetPortal.Get(left) < this->GlobalRegularIdSetPortal.Get(right))
{
return vtkm::worklet::contourtree_augmented::IsAscending(
this->SuperparentSetPortal.Get(left));
}
if (this->GlobalRegularIdSetPortal.Get(left) > this->GlobalRegularIdSetPortal.Get(right))
{
return !vtkm::worklet::contourtree_augmented::IsAscending(
this->SuperparentSetPortal.Get(left));
}
// fall-through (should never happen)
return false;
} // operator()
private:
IdArrayPortalType SuperparentSetPortal;
FieldArrayPortalType DataValueSetPortal;
IdArrayPortalType GlobalRegularIdSetPortal;
}; // AttachmentAndSupernodeComparatorImpl
/// Execution object for Compartor used in HierarchicalAugmenter<FieldType>::ResizeArrays to sort this->SupernodeSorter
/// A comparator that sorts supernode pairs by:
/// 1. the superparent (ie the superarc into which an attachment point inserts)
/// note that this implicitly sorts on round of insertion as well
/// 2. data value
/// 3. global regular ID
///
/// The superparent is assumed to have a flag indicating ascending/descending, and this
/// needs to be used to get the correct inwards ordering along each superarc
template <typename FieldType>
class AttachmentAndSupernodeComparator : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
AttachmentAndSupernodeComparator(
const vtkm::worklet::contourtree_augmented::IdArrayType& superparentSet,
const vtkm::cont::ArrayHandle<FieldType>& dataValueSet,
const vtkm::worklet::contourtree_augmented::IdArrayType& globalRegularIdSet)
: SuperparentSet(superparentSet)
, DataValueSet(dataValueSet)
, GlobalRegularIdSet(globalRegularIdSet)
{ // constructor
} // constructor
/// Create a AttachmentAndSupernodeComparatorImpl object for use in the sort or worklet
VTKM_CONT AttachmentAndSupernodeComparatorImpl<FieldType> PrepareForExecution(
vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const
{
return AttachmentAndSupernodeComparatorImpl<FieldType>(
this->SuperparentSet.PrepareForInput(device, token),
this->DataValueSet.PrepareForInput(device, token),
this->GlobalRegularIdSet.PrepareForInput(device, token));
}
private:
/// the superparent Id
vtkm::worklet::contourtree_augmented::IdArrayType SuperparentSet;
/// the global rergular Id for tiebreak
vtkm::cont::ArrayHandle<FieldType> DataValueSet;
/// the supernode Id for tiebreak
vtkm::worklet::contourtree_augmented::IdArrayType GlobalRegularIdSet;
}; // AttachmentAndSupernodeComparator
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,120 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_ids_equal_comparator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_ids_equal_comparator_h
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Implementation for a comparator that sorts supernode pairs by:
/// 1. the superparent round
/// 2. global regular Id
/// 3. supernode Id (if any)
class AttachmentIdsEqualComparatorImpl
{
public:
using IdArrayPortalType =
typename vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
// constructor
VTKM_CONT
AttachmentIdsEqualComparatorImpl(IdArrayPortalType globalRegularIdsPortal)
: GlobalRegularIdsPortal(globalRegularIdsPortal)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& x, const vtkm::Id& y) const
{ // operator()
return (this->GlobalRegularIdsPortal.Get(x) == this->GlobalRegularIdsPortal.Get(y));
} // operator()
private:
IdArrayPortalType GlobalRegularIdsPortal;
}; // AttachmentIdsEqualComparatorImpl
/// Execution object for a comparator that sorts supernode pairs by:
/// 1. the superparent round
/// 2. global regular Id
/// 3. supernode Id (if any)
class AttachmentIdsEqualComparator : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
AttachmentIdsEqualComparator(
const vtkm::worklet::contourtree_augmented::IdArrayType globalRegularIds)
: GlobalRegularIds(globalRegularIds)
{ // constructor
} // constructor
/// Create a AttachmentIdsEqualComparatorImpl object for use in the sort or worklet
VTKM_CONT AttachmentIdsEqualComparatorImpl PrepareForExecution(vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const
{
return AttachmentIdsEqualComparatorImpl(this->GlobalRegularIds.PrepareForInput(device, token));
}
private:
vtkm::worklet::contourtree_augmented::IdArrayType GlobalRegularIds;
}; // AttachmentIdsEqualComparator
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,209 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// COMMENTS:
//
// A comparator that sorts supernode pairs by:
// 1. the superparent round
// 2. global regular Id
// 3. supernode Id (if any)
//
// We don't care about the orientation of the superarc for this comparator
//
// For duplicates, we assume that at MOST one (in fact, it should always be EXACTLY one)
// copy has a supernode Id set. This is because when we exchange between blocks, we set
// the supernode Id to NO_SUCH_ELEMENT. That way, only the copy that belongs on the block
// has the supernode Id set. We want to ensure that it appears at the beginning of the segment,
// and don't care about the ordering of any others.
//
//=======================================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_superparent_and_index_comparator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_augmenter_attachment_superparent_and_index_comparator_h
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Implementation for a comparator that sorts supernode pairs by:
/// 1. the superparent round
/// 2. global regular Id
/// 3. supernode Id (if any)
class AttachmentSuperparentAndIndexComparatorImpl
{
public:
using IdArrayPortalType =
typename vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
// constructor
VTKM_CONT
AttachmentSuperparentAndIndexComparatorImpl(IdArrayPortalType superparentsPortal,
IdArrayPortalType globalRegularIdsPortal,
IdArrayPortalType supernodeIdsPortal)
: SuperparentsPortal(superparentsPortal)
, GlobalRegularIdsPortal(globalRegularIdsPortal)
, SupernodeIdsPortal(supernodeIdsPortal)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& left, const vtkm::Id& right) const
{ // operator()
// optimisation for sorts which compare an element with itself
// if the element compares with itself, always return false (it's not less than itself)
if (left == right)
{
return false;
}
// first comparison is on superparent WITHOUT ascending descending flag
if (vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentsPortal.Get(left)) <
vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentsPortal.Get(right)))
{
return true;
}
if (vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentsPortal.Get(left)) >
vtkm::worklet::contourtree_augmented::MaskedIndex(this->SuperparentsPortal.Get(right)))
{
return false;
}
// second comparison is on global regular Id
if (this->GlobalRegularIdsPortal.Get(left) < this->GlobalRegularIdsPortal.Get(right))
{
return vtkm::worklet::contourtree_augmented::IsAscending(this->SuperparentsPortal.Get(left));
}
if (this->GlobalRegularIdsPortal.Get(left) > this->GlobalRegularIdsPortal.Get(right))
{
return !vtkm::worklet::contourtree_augmented::IsAscending(this->SuperparentsPortal.Get(left));
}
// it now depends on whether they have actual IDs (ie they are on this block anyway)
if (vtkm::worklet::contourtree_augmented::NoSuchElement(this->SupernodeIdsPortal.Get(left)))
{ // left does not exist
if (vtkm::worklet::contourtree_augmented::NoSuchElement(this->SupernodeIdsPortal.Get(right)))
{ // right does not exist
// neither exists: sort on input indices instead
return (left < right);
} // right does not exist
else
{ // right does exist
// right exists but left doesn't - sort right lower
return false;
} // right does exist
} // left does not exist
else
{ // left does exist
if (vtkm::worklet::contourtree_augmented::NoSuchElement(this->SupernodeIdsPortal.Get(right)))
{ // right does not exist
// left exists but right doesn't - sort left lower
return true;
} // right does not exist
else
{ // right does exist
// both exist
return (this->SupernodeIdsPortal.Get(left) < this->SupernodeIdsPortal.Get(right));
} // right does exist
} // left does exist
} // operator()
private:
IdArrayPortalType SuperparentsPortal;
IdArrayPortalType GlobalRegularIdsPortal;
IdArrayPortalType SupernodeIdsPortal;
}; // AttachmentSuperparentAndIndexComparatorImpl
/// Execution object for a comparator that sorts supernode pairs by:
/// 1. the superparent round
/// 2. global regular Id
/// 3. supernode Id (if any)
class AttachmentSuperparentAndIndexComparator : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
AttachmentSuperparentAndIndexComparator(
const vtkm::worklet::contourtree_augmented::IdArrayType superparents,
const vtkm::worklet::contourtree_augmented::IdArrayType globalRegularIds,
const vtkm::worklet::contourtree_augmented::IdArrayType supernodeIds)
: Superparents(superparents)
, GlobalRegularIds(globalRegularIds)
, SupernodeIds(supernodeIds)
{ // constructor
} // constructor
/// Create a AttachmentSuperparentAndIndexComparatorImpl object for use in the sort or worklet
VTKM_CONT AttachmentSuperparentAndIndexComparatorImpl
PrepareForExecution(vtkm::cont::DeviceAdapterId device, vtkm::cont::Token& token) const
{
return AttachmentSuperparentAndIndexComparatorImpl(
this->Superparents.PrepareForInput(device, token),
this->GlobalRegularIds.PrepareForInput(device, token),
this->SupernodeIds.PrepareForInput(device, token));
}
private:
/// the superparent Id
vtkm::worklet::contourtree_augmented::IdArrayType Superparents;
/// the global rergular Id for tiebreak
vtkm::worklet::contourtree_augmented::IdArrayType GlobalRegularIds;
/// the supernode Id for tiebreak
vtkm::worklet::contourtree_augmented::IdArrayType SupernodeIds;
}; // AttachmentSuperparentAndIndexComparator
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,29 @@
##============================================================================
## Copyright (c) Kitware, Inc.
## All rights reserved.
## See LICENSE.txt for details.
##
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
##============================================================================
set(headers
IsAttachementPointPredicate.h
IsAscendingDecorator.h
IsAttachementPointNeededPredicate.h
AttachmentSuperparentAndIndexComparator.h
AttachmentIdsEqualComparator.h
SetFirstAttachmentPointInRoundWorklet.h
UpdateHyperstructureSetHyperarcsAndNodesWorklet.h
UpdateHyperstructureSetSuperchildrenWorklet.h
FindSuperparentForNecessaryNodesWorklet.h
CopyBaseRegularStructureWorklet.h
SetSuperparentSetDecorator.h
AttachmentAndSupernodeComparator.h
ResizeArraysBuildNewSupernodeIdsWorklet.h
CreateSuperarcsWorklet.h
HierarchicalAugmenterInOutData.h
)
vtkm_declare_headers(${headers})

@ -0,0 +1,166 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_copy_base_regular_structure_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_copy_base_regular_structure_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalAugmenter::CopyBaseRegularStructure for
/// finding the superparent for each node needed
class CopyBaseRegularStructureWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
/// NOTE: We require the input arrays (aside form the input domain) to be permutted by the
/// regularNodesNeeded input domain so that we can use FieldIn instead of WholeArrayIn
/// NOTE: We require ArrayHandleView for the output arrays of the range [numExistingRegular:end] so
/// that we can use FieldOut instead of requiring WholeArrayInOut
using ControlSignature = void(
FieldIn
regularNodesNeededRange, // input domain ArrayHandleIndex of [0, regularNodesNeeded.GetNumberOfValues()]
FieldIn
baseTreeRegularNodeGlobalIdsPermuted, // input baseTree->regularNodeGlobalIds permuted by regularNodesNeeded
FieldIn baseTreeDataValuesPermuted, // input baseTree->dataValues permuted by regularNodesNeeded
FieldIn regularSuperparentsPermuted, // input regularSuperparents permuted by regularNodesNeeded
FieldOut
augmentedTreeRegularNodeGlobalIdsView, // output view of augmentedTree->regularNodeGlobalIds[numExistingRegular:]
FieldOut
augmentedTreeDataValuesView, // output view of augmentedTree->dataValues[numExistingRegular:]
FieldOut
augmentedTreeSuperparentsView, // output view of augmentedTree->superparents[numExistingRegular:]
FieldOut
augmentedTreeRegularNodeSortOrderView // output view of augmentedTree->regularNodeSortOrder[numExistingRegular:]
);
using ExecutionSignature = void(_1, _2, _3, _4, _5, _6, _7, _8);
using InputDomain = _1;
/// Default Constructor
VTKM_EXEC_CONT
CopyBaseRegularStructureWorklet(const vtkm::Id& numExistingRegular)
: NumExistingRegular(numExistingRegular)
{
}
/// operator() of the workelt
template <typename FieldType>
VTKM_EXEC void operator()(
const vtkm::Id& neededRegNode, // InputIndex in [0, regularNodesNeeded.GetNumberOfValues()]
const vtkm::Id&
baseTreeRegularNodeGlobalId, // same as baseTree->regularNodeGlobalIds[oldRegularID]
const FieldType& baseTreeDataValue, // same as baseTree->dataValues[oldRegularID]
const vtkm::Id& regularSuperparentsValue, // same regularSuperparents[oldRegularID]
vtkm::Id&
augmentedTreeRegularNodeGlobalIdValue, // same as augmentedTree->regularNodeGlobalIDs[NumExistingRegular + neededRegNode] = ...
FieldType&
augmentedTreeDataValue, // same as augmentedTree->dataValues[NumExistingRegular + neededRegNode] = ...
vtkm::Id&
augmentedTreeSuperparentsValue, // same as augmentedTree->superparents[NumExistingRegular + neededRegNode] = ...
vtkm::Id&
augmentedTreeRegularNodeSortOrderValue // same as augmentedTree->regularNodeSortOrder [NumExistingRegular + neededRegNode] = ...
) const
{
// per regular node needing addition
// retrieve the existing index. oldRegularID is set on input
vtkm::Id newRegularId = this->NumExistingRegular +
neededRegNode; // not needed since we do ArrayHandleViews on the outside
// now use them to copy data
augmentedTreeRegularNodeGlobalIdValue = baseTreeRegularNodeGlobalId;
augmentedTreeDataValue = baseTreeDataValue;
augmentedTreeSuperparentsValue = regularSuperparentsValue;
// this one is special since we need to resort - set it to identity, leaving the sort order of old vertices alone
// this *MAY* make certain sorts run faster
augmentedTreeRegularNodeSortOrderValue = newRegularId;
// NOTE: we can skip this step since Regular2supernode is already initalized with NO_SUCH_ELEMENT
// since these are *ALL* only regular nodes, this one's easy:
// augmentedTree->regular2supernode [newRegularID] = NO_SUCH_ELEMENT;
// In serial this worklet implements the following operation
/*
for (vtkm::Id neededRegNode = 0; neededRegNode < nRegNeeded; neededRegNode++)
{ // per regular node needing addition
// retrieve the existing index
vtkm::Id oldRegularID = regularNodesNeeded[neededRegNode];
// and compute the new index
vtkm::Id newRegularID = nExistingRegular + neededRegNode;
// now use them to copy data
augmentedTree->regularNodeGlobalIDs [newRegularID] = baseTree->regularNodeGlobalIDs [oldRegularID];
augmentedTree->dataValues [newRegularID] = baseTree->dataValues [oldRegularID];
augmentedTree->superparents [newRegularID] = regularSuperparents [oldRegularID];
// this one is special since we need to resort - set it to identity, leaving the sort order of old vertices alone
// this *MAY* make certain sorts run faster
augmentedTree->regularNodeSortOrder [newRegularID] = newRegularID;
// since these are *ALL* only regular nodes, this one's easy:
augmentedTree->regular2supernode [newRegularID] = NO_SUCH_ELEMENT;
} // per regular node needing addition
*/
} // operator()()
private:
const vtkm::Id NumExistingRegular;
}; // CopyBaseRegularStructureWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,416 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_create_superarcs_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_create_superarcs_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used to implement the main part of HierarchicalAugmenter::CreateSuperarcs
/// Connect superarcs for the level & set hyperparents & superchildren count, whichRound,
/// whichIteration, super2hypernode
class CreateSuperarcsWorklet : public vtkm::worklet::WorkletMapField
{
public:
// TODO: Check if augmentedTreeFirstSupernodePerIteration could be changed to WholeArrayOut or if we need the In to preserve orignal values
/// Control signature for the worklet
/// @param[in] supernodeSorter input domain. We need access to InputIndex and InputIndex+1,
/// therefore this is a WholeArrayIn transfer.
/// @param[in] superparentSet WholeArrayIn because we need access to superparentSet[supernodeSorter[InputIndex]]
/// and superparentSet[supernodeSorter[InputIndex+1]].
/// @param[in] baseTreeSuperarcs WholeArrayIn because we need access to baseTreeSuperarcsPortal.Get(superparentOldSuperId)
/// While this could be done with fancy array magic, it would require a sequence of multiple
/// fancy arrays and would likely not be cheaper then computing things in the worklet.
/// @param[in] newSupernodeIds WholeArrayIn because we need to access newSupernodeIdsPortal.Get(oldTargetSuperId)
/// where oldTargetSuperId is the unmasked baseTreeSuperarcsPortal.Get(superparentOldSuperId)
/// @param[in] baseTreeSupernodes WholeArrayIn because we need to access baseTreeSupernodesPortal.Get(superparentOldSuperId);
/// @param[in] baseTreeRegularNodeGlobalIds WholeArrayIn because we need to access
/// baseTreeRegularNodeGlobalIdsPortal.Get(superparentOldSuperId);
/// @param[in] globalRegularIdSet FieldInd. Permute globalRegularIdSet with supernodeSorter in order to allow this to be a FieldIn.
/// @param[in] baseTreeSuper2Hypernode WholeArrayIn because we need to access
/// baseTreeSuper2HypernodePortal.Get(superparentOldSuperId)
/// @param[in] baseTreeWhichIteration WholeArrayIn because we need to access baseTreeWhichIterationPortal.Get(superparentOldSuperId)
/// and baseTreeWhichIterationPortal.Get(superparentOldSuperId+1)
/// @param[in] augmentedTreeSuperarcsView output view of this->AugmentedTree->Superarcs with
/// vtkm::cont::make_ArrayHandleView(this->AugmentedTree->Superarcs,
/// numSupernodesAlready, this->SupernodeSorter.GetNumberOfValues()).
/// By using this view allows us to do this one as a FieldOut and it effectively the
/// same as accessing the array at the newSuppernodeId location.
/// @param[in] augmentedTreeFirstSupernodePerIteration WholeArrayInOut because we need to update multiple locations.
/// In is used to preseve original values. Set to augmentedTree->firstSupernodePerIteration[roundNumber].
/// @param[in] augmentedTreeSuper2hypernode FieldOut. Output view of this->AugmentedTree->Super2Hypernode
/// vtkm::cont::make_ArrayHandleView(this->AugmentedTree->Super2Hypernode,
/// numSupernodesAlready, this->SupernodeSorter.GetNumberOfValues()).
/// By using this view allows us to do this one as a FieldOut and it effectively the
/// same as accessing the array at the newSuppernodeId location.
using ControlSignature = void(
WholeArrayIn supernodeSorter,
WholeArrayIn superparentSet, // input
WholeArrayIn baseTreeSuperarcs, // input
WholeArrayIn newSupernodeIds, // input
WholeArrayIn baseTreeSupernodes, // input
WholeArrayIn baseTreeRegularNodeGlobalIds, // input
FieldIn globalRegularIdSet, // input
WholeArrayIn baseTreeSuper2Hypernode, // input
WholeArrayIn baseTreeWhichIteration, // input
FieldOut augmentedTreeSuperarcsView, // output
WholeArrayInOut augmentedTreeFirstSupernodePerIteration, // input/output
FieldOut augmentedTreeSuper2hypernode // ouput
);
using ExecutionSignature = void(InputIndex, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12);
using InputDomain = _1;
/// Default Constructor
/// @param[in] numSupernodesAlready Set to vtkm::cont::ArrayGetValue(0, this->AugmentedTree->FirstSupernodePerIteration[roundNumber]);
/// @param[in] baseTreeNumRounds Set to this->BaseTree->NumRounds
/// @param[in] augmentedTreeNumIterations Set to vtkm::cont::ArrayGetValue(roundNumber, this->AugmentedTree->NumIterations);
/// @param[in] roundNumber Set the current round
/// @param[in] numAugmentedTreeSupernodes Set to augmentedTreeSupernodes this->AugmentedTree->Supernodes.GetNumberOfValues();
VTKM_EXEC_CONT
CreateSuperarcsWorklet(const vtkm::Id& numSupernodesAlready,
const vtkm::Id& baseTreeNumRounds,
const vtkm::Id& augmentedTreeNumIterations,
const vtkm::Id& roundNumber,
const vtkm::Id& numAugmentedTreeSupernodes)
: NumSupernodesAlready(numSupernodesAlready)
, BaseTreeNumRounds(baseTreeNumRounds)
, AugmentedTreeNumIterations(augmentedTreeNumIterations)
, RoundNumber(roundNumber)
, NumAugmentedTreeSupernodes(numAugmentedTreeSupernodes)
{
}
/// operator() of the workelt
template <typename InFieldPortalType, typename InOutFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& supernode, // InputIndex of supernodeSorter
const InFieldPortalType& supernodeSorterPortal,
const InFieldPortalType& superparentSetPortal,
const InFieldPortalType& baseTreeSuperarcsPortal,
const InFieldPortalType& newSupernodeIdsPortal,
const InFieldPortalType& baseTreeSupernodesPortal,
const InFieldPortalType& baseTreeRegularNodeGlobalIdsPortal,
const vtkm::Id& globalRegularIdSetValue,
const InFieldPortalType& baseTreeSuper2HypernodePortal,
const InFieldPortalType& baseTreeWhichIterationPortal,
vtkm::Id& augmentedTreeSuperarcsValue, // same as augmentedTree->superarcs[newSupernodeId]
const InOutFieldPortalType&
augmentedTreeFirstSupernodePerIterationPortal, // augmentedTree->firstSupernodePerIteration[roundNumber]
vtkm::Id& augmentedTreeSuper2hypernodeValue) const
{
// per supernode in the set
// retrieve the index from the sorting index array
vtkm::Id supernodeSetIndex = supernodeSorterPortal.Get(supernode);
// work out the new supernode Id. We have this defined on the outside as a fancy array handle,
// however, using the fancy handle here would not really make a performance differnce and
// computing it here is more readable
vtkm::Id newSupernodeId = this->NumSupernodesAlready + supernode;
// NOTE: The newRegularId is no longer needed here since all parts
// that used it in the worklet have been moved outside
// vtkm::Id newRegularId = newSupernodeId;
// NOTE: This part has been moved out of the worklet and is performed using standard vtkm copy constructs
// // setting the supernode's regular Id is now trivial
// augmentedTreeSupernodesPortal.Set(newSupernodeId, newRegularId);
// retrieve the ascending flag from the superparent
vtkm::Id superparentSetVal = superparentSetPortal.Get(supernodeSetIndex);
// get the ascending flag from the parent
bool superarcAscends = vtkm::worklet::contourtree_augmented::IsAscending(superparentSetVal);
// strip the ascending flag from the superparent.
vtkm::Id superparentOldSuperId =
vtkm::worklet::contourtree_augmented::MaskedIndex(superparentSetVal);
// setting the superarc is done the usual way. Our sort routine has ended up
// with the supernodes arranged in either ascending or descending order
// inwards along the parent superarc (as expressed by the superparent Id).
// Each superarc except the last in the segment points to the next one:
// the last one points to the target of the original superarc.
// first test to see if we're the last in the array
if (supernode == supernodeSorterPortal.GetNumberOfValues() - 1)
{ // last in the array
// special case for root of entire tree at end of top level
if (RoundNumber == this->BaseTreeNumRounds)
{
augmentedTreeSuperarcsValue = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
else
{ // not the tree root
// retrieve the target of the superarc from the base tree (masking to strip out the ascending flag)
vtkm::Id oldTargetSuperId = vtkm::worklet::contourtree_augmented::MaskedIndex(
baseTreeSuperarcsPortal.Get(superparentOldSuperId));
// convert to a new supernode Id
vtkm::Id newTargetSuperId = newSupernodeIdsPortal.Get(oldTargetSuperId);
// add the ascending flag back in and store in the array
augmentedTreeSuperarcsValue = newTargetSuperId |
(superarcAscends ? vtkm::worklet::contourtree_augmented::IS_ASCENDING : 0x00);
} // not the tree root
// since there's an extra entry in the firstSupernode array as a sentinel, set it
augmentedTreeFirstSupernodePerIterationPortal.Set(this->AugmentedTreeNumIterations,
NumAugmentedTreeSupernodes);
} // last in the array
else if (superparentOldSuperId !=
vtkm::worklet::contourtree_augmented::MaskedIndex(
superparentSetPortal.Get(supernodeSorterPortal.Get(supernode + 1))))
{ // last in the segment
// retrieve the target of the superarc from the base tree (masking to strip out the ascending flag)
vtkm::Id oldTargetSuperId = vtkm::worklet::contourtree_augmented::MaskedIndex(
baseTreeSuperarcsPortal.Get(superparentOldSuperId));
// convert to a new supernode Id
vtkm::Id newTargetSuperId = newSupernodeIdsPortal.Get(oldTargetSuperId);
// add the ascending flag back in and store in the array
augmentedTreeSuperarcsValue = newTargetSuperId |
(superarcAscends ? vtkm::worklet::contourtree_augmented::IS_ASCENDING : 0x00);
// since we're the last in the segment, we check to see if we are at the end of an iteration
vtkm::Id iterationNumber = vtkm::worklet::contourtree_augmented::MaskedIndex(
baseTreeWhichIterationPortal.Get(superparentOldSuperId));
vtkm::Id iterationNumberOfNext = vtkm::worklet::contourtree_augmented::MaskedIndex(
baseTreeWhichIterationPortal.Get(superparentOldSuperId + 1));
if (iterationNumber != iterationNumberOfNext)
{ // boundary of iterations
// If so, we set the "firstSupernodePerIteration" for the next
augmentedTreeFirstSupernodePerIterationPortal.Set(iterationNumberOfNext,
newSupernodeId + 1);
} // boundary of iterations
} // last in the segment
else
{ // not last in the segment
// the target is always the next one, so just store it with the ascending flag
augmentedTreeSuperarcsValue = (newSupernodeId + 1) |
(superarcAscends ? vtkm::worklet::contourtree_augmented::IS_ASCENDING : 0x00);
} // not last in the segment
// set the first supernode in the first iteration to the beginning of the round
augmentedTreeFirstSupernodePerIterationPortal.Set(0, this->NumSupernodesAlready);
// NOTE: This part has been moved out of the worklet and is performed using standard vtkm copy constructs
// // setting the hyperparent is straightforward since the hyperstructure is preserved
// // we take the superparent (which is guaranteed to be in the baseTree), find it's hyperparent and use that
// augmentedTreeHyperparentsPortal.Set(newSupernodeId, baseTreeHyperparentsPortal.Get(superparentOldSuperId));
// NOTE: This part could potentially be made a separate worklet but it does not seem necessary
// similarly, the super2hypernode should carry over, but it's harder to test because of the attachment points which
// do not have valid old supernode Ids. Instead, we check their superparent's regular global Id against them: if it
// matches, then it must be the start of the superarc, in which case it does have an old Id, and we can then use the
// existing hypernode Id
vtkm::Id superparentOldRegularId = baseTreeSupernodesPortal.Get(superparentOldSuperId);
vtkm::Id superparentGlobalId = baseTreeRegularNodeGlobalIdsPortal.Get(superparentOldRegularId);
// Here: globalRegularIdSetValue is the same as globalRegularIdSetPortal.Get(supernodeSetIndex)
if (superparentGlobalId == globalRegularIdSetValue)
{
// augmentedTreeSuper2hypernodePortal.Set(newSupernodeId, baseTreeSuper2HypernodePortal.Get(superparentOldSuperId));
augmentedTreeSuper2hypernodeValue = baseTreeSuper2HypernodePortal.Get(superparentOldSuperId);
}
else
{
// augmentedTreeSuper2hypernodePortal.Set(newSupernodeId, vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT);
augmentedTreeSuper2hypernodeValue = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
// NOTE: This part has been moved out of the worklet and is performed using standard vtkm copy constructs
// // which round and iteration carry over
// augmentedTreeWhichRoundPortal.Set(newSupernodeId, baseTreeWhichRoundPortal.Get(superparentOldSuperId));
// augmentedTreeWhichIterationPortal.Set(newSupernodeId, baseTreeWhichIterationPortal.Get(superparentOldSuperId));
// now we deal with the regular-sized arrays
// NOTE: This part has been moved out of the worklet and is performed using standard vtkm copy constructs
// // copy the global regular Id and data value
// augmentedTreeRegularNodeGlobalIdsPortal.Set(newRegularId, globalRegularIdSetPortal.Get(supernodeSetIndex));
// augmentedTreeDataValuesPortal.Set(newRegularId, dataValueSetPortal.Get(supernodeSetIndex));
// NOTE: This part has been moved out of the worklet and is performed using standard vtkm copy constructs
// // the sort order will be dealt with later
// // since all of these nodes are supernodes, they will be their own superparent, which means that:
// // a. the regular2node can be set immediately
// augmentedTreeRegular2SupernodePortal.Set(newRegularId, newSupernodeId);
// // b. as can the superparent
// augmentedTreeSuperparentsPortal.Set(newRegularId, newSupernodeId);
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = 0; supernode < supernodeSorter.size(); supernode++)
{ // per supernode in the set
// retrieve the index from the sorting index array
vtkm::Id supernodeSetIndex = supernodeSorter[supernode];
// work out the new supernode ID
vtkm::Id newSupernodeID = numSupernodesAlready + supernode;
// At all levels above 0, we used to keep regular vertices in case they are attachment points. After augmentation, we don't need to.
// Instead, at all levels above 0, the regular nodes in each round are identical to the supernodes
// In order to avoid confusion, we will copy the ID into a separate variable
vtkm::Id newRegularID = newSupernodeID;
// setting the supernode's regular ID is now trivial
augmentedTree->supernodes [newSupernodeID] = newRegularID;
// retrieve the ascending flag from the superparent
bool superarcAscends = isAscending(superparentSet[supernodeSetIndex]);
// strip the ascending flag from the superparent
vtkm::Id superparentOldSuperID = maskedIndex(superparentSet[supernodeSetIndex]);
// setting the superarc is done the usual way. Our sort routine has ended up with the supernodes arranged in either ascending or descending order
// inwards along the parent superarc (as expressed by the superparent ID). Each superarc except the last in the segment points to the next one:
// the last one points to the target of the original superarc.
// first test to see if we're the last in the array
if (supernode == supernodeSorter.size() - 1)
{ // last in the array
// special case for root of entire tree at end of top level
if (roundNumber == baseTree->nRounds)
{
augmentedTree->superarcs[newSupernodeID] = NO_SUCH_ELEMENT;
}
else
{ // not the tree root
// retrieve the target of the superarc from the base tree (masking to strip out the ascending flag)
vtkm::Id oldTargetSuperID = maskedIndex(baseTree->superarcs[superparentOldSuperID]);
// convert to a new supernode ID
vtkm::Id newTargetSuperID = newSupernodeIDs[oldTargetSuperID];
// add the ascending flag back in and store in the array
augmentedTree->superarcs[newSupernodeID] = newTargetSuperID | (superarcAscends ? IS_ASCENDING : 0x00);
} // not the tree root
// since there's an extra entry in the firstSupernode array as a sentinel, set it
augmentedTree->firstSupernodePerIteration[roundNumber][augmentedTree->nIterations[roundNumber]] = augmentedTree->supernodes.size();
} // last in the array
else if (superparentOldSuperID != maskedIndex(superparentSet[supernodeSorter[supernode+1]]))
{ // last in the segment
// retrieve the target of the superarc from the base tree (masking to strip out the ascending flag)
vtkm::Id oldTargetSuperID = maskedIndex(baseTree->superarcs[superparentOldSuperID]);
// convert to a new supernode ID
vtkm::Id newTargetSuperID = newSupernodeIDs[oldTargetSuperID];
// add the ascending flag back in and store in the array
augmentedTree->superarcs[newSupernodeID] = newTargetSuperID | (superarcAscends ? IS_ASCENDING : 0x00);
// since we're the last in the segment, we check to see if we are at the end of an iteration
vtkm::Id iterationNumber = maskedIndex(baseTree->whichIteration[superparentOldSuperID]);
vtkm::Id iterationNumberOfNext = maskedIndex(baseTree->whichIteration[superparentOldSuperID + 1]);
if (iterationNumber != iterationNumberOfNext)
{ // boundary of iterations
// If so, we set the "firstSupernodePerIteration" for the next
augmentedTree->firstSupernodePerIteration[roundNumber][iterationNumberOfNext] = newSupernodeID + 1;
} // boundary of iterations
} // last in the segment
else
{ // not last in the segment
// the target is always the next one, so just store it with the ascending flag
augmentedTree->superarcs[newSupernodeID] = (newSupernodeID+1) | (superarcAscends ? IS_ASCENDING : 0x00);
} // not last in the segment
// set the first supernode in the first iteration to the beginning of the round
augmentedTree->firstSupernodePerIteration[roundNumber][0] = numSupernodesAlready;
// setting the hyperparent is straightforward since the hyperstructure is preserved
// we take the superparent (which is guaranteed to be in the baseTree), find it's hyperparent and use that
augmentedTree->hyperparents [newSupernodeID] = baseTree->hyperparents [superparentOldSuperID];
// similarly, the super2hypernode should carry over, but it's harder to test because of the attachment points which
// do not have valid old supernode IDs. Instead, we check their superparent's regular global ID against them: if it
// matches, then it must be the start of the superarc, in which case it does have an old ID, and we can then use the
// existing hypernode ID
vtkm::Id superparentOldRegularID = baseTree->supernodes[superparentOldSuperID];
vtkm::Id superparentGlobalID = baseTree->regularNodeGlobalIDs[superparentOldRegularID];
if (superparentGlobalID == globalRegularIDSet[supernodeSetIndex])
{
augmentedTree->super2hypernode [newSupernodeID] = baseTree->super2hypernode[superparentOldSuperID];
}
else
{
augmentedTree->super2hypernode [newSupernodeID] = NO_SUCH_ELEMENT;
}
// which round and iteration carry over
augmentedTree->whichRound [newSupernodeID] = baseTree->whichRound[superparentOldSuperID];
augmentedTree->whichIteration [newSupernodeID] = baseTree->whichIteration[superparentOldSuperID];
// now we deal with the regular-sized arrays
// copy the global regular ID and data value
augmentedTree->regularNodeGlobalIDs [newRegularID] = globalRegularIDSet[supernodeSetIndex];
augmentedTree->dataValues [newRegularID] = dataValueSet[supernodeSetIndex];
// the sort order will be dealt with later
// since all of these nodes are supernodes, they will be their own superparent, which means that:
// a. the regular2node can be set immediately
augmentedTree->regular2supernode [newRegularID] = newSupernodeID;
// b. as can the superparent
augmentedTree->superparents [newRegularID] = newSupernodeID;
} // per supernode in the set
*/
} // operator()()
private:
const vtkm::Id NumSupernodesAlready;
const vtkm::Id BaseTreeNumRounds;
const vtkm::Id AugmentedTreeNumIterations;
const vtkm::Id RoundNumber;
const vtkm::Id NumAugmentedTreeSupernodes;
}; // CreateSuperarcsWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,210 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_find_superparent_for_necessary_nodes_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_find_superparent_for_necessary_nodes_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalAugmenter::CopyBaseRegularStructure for
/// finding the superparent for each node needed
class FindSuperparentForNecessaryNodesWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
using ControlSignature = void(
FieldIn baseTreeRegularNodeGlobalIds, // input domain
FieldIn baseTreeSuperparents, // input
FieldIn baseTreeDataValues, // input
WholeArrayIn baseTreeSuperarcs, // input
WholeArrayIn newSupernodeIds, // input
// Execution objects from the AugmentedTree to use the FindRegularByGlobal
// and FindSuperArcForUnknownNode for the hierarchical tree.
ExecObject findRegularByGlobal,
ExecObject findSuperArcForUnknownNode,
// Output arrays to populate
FieldOut regularSuperparents, // output
FieldOut regularNodesNeeded // output
);
using ExecutionSignature = void(InputIndex, _1, _2, _3, _4, _5, _6, _7, _8, _9);
using InputDomain = _1;
/// Default Constructor
VTKM_EXEC_CONT
FindSuperparentForNecessaryNodesWorklet() {}
/// operator() of the workelt
template <typename InFieldPortalType,
typename FieldType,
typename ExecObjectType1,
typename ExecObjectType2>
VTKM_EXEC void operator()(
const vtkm::Id& regularNode, // InputIndex a.k.a out loop index
const vtkm::Id& globalRegularId, // same as baseTree->regularNodeGlobalIDs[regularNode];
const vtkm::Id& oldSuperparent, // same as baseTree->superparents[regularNode];
const FieldType& dataValue, // same as baseTree->dataValues[regularNode]
const InFieldPortalType& baseTreeSuperarcsPortal,
const InFieldPortalType& newSupernodeIdsPortal,
const ExecObjectType1& findRegularByGlobal, // Execution object to call FindRegularByGlobal
const ExecObjectType2&
findSuperArcForUnknownNode, // Execution object to call FindSuperArcForUnknownNode
vtkm::Id&
regularSuperparentsValue, // same as regularSuperparents[regularNode] = ... (set on output)
vtkm::Id&
regularNodesNeededValue // same as regularNodesNeeded[regularNode] = ... (set on output)
) const
{
// per regular node
// retrieve the index (globalRegularId set on input)
// first check to see if it is already present (newRegularId set on input)
vtkm::Id newRegularId = findRegularByGlobal.FindRegularByGlobal(globalRegularId);
// if it fails this test, then it's already in tree
if (vtkm::worklet::contourtree_augmented::NoSuchElement(newRegularId))
{ // not yet in tree
// since it's not in the tree, we want to find where it belongs
// to do so, we need to find an "above" and "below" node for it. Since it exists in the old tree, it belongs to a superarc, and we
// can use the ends of the superarc as above and below to do the searching
// oldSuperparent set on Input vtkm::Id oldSuperparent = baseTree->superparents[regularNode];
vtkm::Id oldSuperarc = baseTreeSuperarcsPortal.Get(oldSuperparent);
// break the superarc into the flag and the target
// NOTE that we do not test for NO_SUCH_ELEMENT as all attachment points and the root are guaranteed to be present already,
// and have therefore been excluded by the if statement already
vtkm::Id oldSuperTarget = vtkm::worklet::contourtree_augmented::MaskedIndex(oldSuperarc);
bool ascendingSuperarc = vtkm::worklet::contourtree_augmented::IsAscending(oldSuperarc);
// convert both from and to into new supernode IDs
vtkm::Id newSuperparent = newSupernodeIdsPortal.Get(oldSuperparent);
vtkm::Id newSuperTarget = newSupernodeIdsPortal.Get(oldSuperTarget);
// retrieve the data value (dataValue set on input)
// now test and retrieve, with above = target if ascending, &c.
if (ascendingSuperarc)
{
regularSuperparentsValue = findSuperArcForUnknownNode.FindSuperArcForUnknownNode(
globalRegularId, dataValue, newSuperTarget, newSuperparent);
}
else
{
regularSuperparentsValue = findSuperArcForUnknownNode.FindSuperArcForUnknownNode(
globalRegularId, dataValue, newSuperparent, newSuperTarget);
}
// either way, we set the index array to the index
regularNodesNeededValue = regularNode;
} // not yet in tree
// Set to NO_SUCH_ELEMENT by default. By doing this in the worklet we an avoid having to
// initialize the output arrays first and we can use FieldIn instead of FieldInOut
else
{
regularSuperparentsValue = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
regularNodesNeededValue = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
// In serial this worklet implements the following operation
/*
// now loop, finding the superparent for each node needed
for (vtkm::Id regularNode = 0; regularNode < baseTree->regularNodeGlobalIDs.size(); regularNode++)
{ // per regular node
// retrieve the index
vtkm::Id globalRegularID = baseTree->regularNodeGlobalIDs[regularNode];
// first check to see if it is already present
vtkm::Id newRegularID = augmentedTree->FindRegularByGlobal(globalRegularID);
// if it fails this test, then it's already in tree
if (noSuchElement(newRegularID))
{ // not yet in tree
// std::cout << "Not yet in tree" << std::endl;
// since it's not in the tree, we want to find where it belongs
// to do so, we need to find an "above" and "below" node for it. Since it exists in the old tree, it belongs to a superarc, and we
// can use the ends of the superarc as above and below to do the searching
vtkm::Id oldSuperparent = baseTree->superparents[regularNode];
vtkm::Id oldSuperarc = baseTree->superarcs[oldSuperparent];
// break the superarc into the flag and the target
// NOTE that we do not test for NO_SUCH_ELEMENT as all attachment points and the root are guaranteed to be present already,
// and have therefore been excluded by the if statement already
vtkm::Id oldSuperTarget = maskedIndex(oldSuperarc);
bool ascendingSuperarc = isAscending(oldSuperarc);
// convert both from and to into new supernode IDs
vtkm::Id newSuperparent = newSupernodeIDs[oldSuperparent];
vtkm::Id newSuperTarget = newSupernodeIDs[oldSuperTarget];
// retrieve the data value
dataType dataValue = baseTree->dataValues[regularNode];
// now test and retrieve, with above = target if ascending, &c.
if (ascendingSuperarc)
regularSuperparents[regularNode] = augmentedTree->FindSuperArcForUnknownNode(globalRegularID, dataValue, newSuperTarget, newSuperparent);
else
regularSuperparents[regularNode] = augmentedTree->FindSuperArcForUnknownNode(globalRegularID, dataValue, newSuperparent, newSuperTarget);
// either way, we set the index array to the index
regularNodesNeeded[regularNode] = regularNode;
} // not yet in tree
} // per regular node
*/
} // operator()()
}; // FindSuperparentForNecessaryNodesWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,202 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=======================================================================================
//
// Parallel Peak Pruning v. 2.0
//
// Started June 15, 2017
//
// Copyright Hamish Carr, University of Leeds
//
// HierarchicalAugmenter.h
//
//=======================================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_hierarchical_augmenter_in_out_data_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_hierarchical_augmenter_in_out_data_h
#include <iostream> // std::cout
#include <sstream> // std::stringstrea
#include <string> // std::string
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Class for storing input or output data for the HierarchicalAugmenter. The data is factored out in this class to
/// allow for modular code and easue reuse, since the input and output require the same types of array parameters
template <typename FieldType>
class HierarchicalAugmenterInOutData
{ // class HierarchicalAugmenter
public:
vtkm::worklet::contourtree_augmented::IdArrayType GlobalRegularIds;
vtkm::cont::ArrayHandle<FieldType> DataValues;
vtkm::worklet::contourtree_augmented::IdArrayType SupernodeIds;
vtkm::worklet::contourtree_augmented::IdArrayType Superparents;
vtkm::worklet::contourtree_augmented::IdArrayType SuperparentRounds;
vtkm::worklet::contourtree_augmented::IdArrayType WhichRounds;
/// empty constructor
HierarchicalAugmenterInOutData() {}
/// main constructor
HierarchicalAugmenterInOutData(
vtkm::worklet::contourtree_augmented::IdArrayType& globalRegularIds,
vtkm::cont::ArrayHandle<FieldType>& dataValues,
vtkm::worklet::contourtree_augmented::IdArrayType& supernodeIds,
vtkm::worklet::contourtree_augmented::IdArrayType& superparents,
vtkm::worklet::contourtree_augmented::IdArrayType& superparentRounds,
vtkm::worklet::contourtree_augmented::IdArrayType& whichRounds)
: GlobalRegularIds(globalRegularIds)
, DataValues(dataValues)
, SupernodeIds(supernodeIds)
, Superparents(superparents)
, SuperparentRounds(superparentRounds)
, WhichRounds(whichRounds)
{
}
/// Destructor
~HierarchicalAugmenterInOutData();
/// Clear all arrays
void ReleaseResources();
/// Print contents fo this objects
std::string DebugPrint(std::string message, const char* fileName, long lineNum);
}; // class HierarchicalAugmenterInOutData
template <typename FieldType>
HierarchicalAugmenterInOutData<FieldType>::~HierarchicalAugmenterInOutData()
{
this->ReleaseResources();
}
// routine to release memory used for out arrays
template <typename FieldType>
void HierarchicalAugmenterInOutData<FieldType>::ReleaseResources()
{ // ReleaseResources()
this->GlobalRegularIds.ReleaseResources();
this->DataValues.ReleaseResources();
this->SupernodeIds.ReleaseResources();
this->Superparents.ReleaseResources();
this->SuperparentRounds.ReleaseResources();
this->WhichRounds.ReleaseResources();
} // ReleaseResources()
template <typename FieldType>
std::string HierarchicalAugmenterInOutData<FieldType>::DebugPrint(std::string message,
const char* fileName,
long lineNum)
{
// DebugPrint()
std::stringstream resultStream;
resultStream << std::endl;
resultStream << "----------------------------------------" << std::endl;
resultStream << std::setw(30) << std::left << fileName << ":" << std::right << std::setw(4)
<< lineNum << std::endl;
resultStream << message << std::endl;
resultStream << "----------------------------------------" << std::endl;
vtkm::worklet::contourtree_augmented::PrintIndices(
"Global Regular Ids", this->GlobalRegularIds, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintValues(
"Data Values", this->DataValues, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Supernode Ids", this->SupernodeIds, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Superparents", this->Superparents, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Superparent Rounds", this->SuperparentRounds, -1, resultStream);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Which Rounds", this->WhichRounds, -1, resultStream);
return resultStream.str();
}
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
namespace vtkmdiy
{
// Struct to serialize ContourTreeMesh objects (i.e., load/save) needed in parralle for DIY
template <typename FieldType>
struct Serialization<vtkm::worklet::contourtree_distributed::hierarchical_augmenter::
HierarchicalAugmenterInOutData<FieldType>>
{
static void save(vtkmdiy::BinaryBuffer& bb,
const vtkm::worklet::contourtree_distributed::hierarchical_augmenter::
HierarchicalAugmenterInOutData<FieldType>& ha)
{
vtkmdiy::save(bb, ha.GlobalRegularIds);
vtkmdiy::save(bb, ha.DataValues);
vtkmdiy::save(bb, ha.SupernodeIds);
vtkmdiy::save(bb, ha.Superparents);
vtkmdiy::save(bb, ha.SuperparentRounds);
vtkmdiy::save(bb, ha.WhichRounds);
}
static void load(vtkmdiy::BinaryBuffer& bb,
vtkm::worklet::contourtree_distributed::hierarchical_augmenter::
HierarchicalAugmenterInOutData<FieldType>& ha)
{
vtkmdiy::load(bb, ha.GlobalRegularIds);
vtkmdiy::load(bb, ha.DataValues);
vtkmdiy::load(bb, ha.SupernodeIds);
vtkmdiy::load(bb, ha.Superparents);
vtkmdiy::load(bb, ha.SuperparentRounds);
vtkmdiy::load(bb, ha.WhichRounds);
}
};
} // namespace mangled_vtkmdiy_namespace
#endif

@ -0,0 +1,94 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_ascending_decorator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_ascending_decorator_h
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Decorator to add the Ascending flag if necessary
class IsAscendingDecorator
{
public:
template <typename PortalType1, typename PortalType2>
struct Functor
{
PortalType1 SuperparentsPortal;
PortalType2 SuperarcsPortal;
VTKM_EXEC_CONT vtkm::Id operator()(vtkm::Id i) const
{
vtkm::Id superparent = this->SuperparentsPortal.Get(i);
if (vtkm::worklet::contourtree_augmented::IsAscending(this->SuperarcsPortal.Get(superparent)))
{
superparent |= vtkm::worklet::contourtree_augmented::IS_ASCENDING;
}
return superparent;
}
};
template <typename PT1, typename PT2>
Functor<PT1, PT2> CreateFunctor(PT1 SuperparentsPortal, PT2 SuperarcsPortal) const
{
return { SuperparentsPortal, SuperarcsPortal };
}
};
} // namespace hierarchical_augmenter
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,143 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_attachement_point_needed_predicate_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_attachement_point_needed_predicate_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
// Implementatin of predicate used in HierarchicalAugmenter<FieldType>::PrepareOutAttachmentPoints
// to determine whether an attachement points needs to be transferred
class IsAttachementPointNeededPredicateImpl
{
public:
using IdPortalType = vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
// constructor - takes vectors as parameters
VTKM_CONT
IsAttachementPointNeededPredicateImpl(
const vtkm::worklet::contourtree_augmented::IdArrayType& superparentsRounds,
const vtkm::worklet::contourtree_augmented::IdArrayType& whichRounds,
const vtkm::Id round,
vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token)
: SuperparentsRoundsPortal(superparentsRounds.PrepareForInput(device, token))
, WhichRoundsPortal(whichRounds.PrepareForInput(device, token))
, Round(round)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& attachmentPoint) const
{ // operator()
return !((this->SuperparentsRoundsPortal.Get(attachmentPoint) <= this->Round) ||
(this->WhichRoundsPortal.Get(attachmentPoint) > this->Round));
} // operator()
private:
IdPortalType SuperparentsRoundsPortal;
IdPortalType WhichRoundsPortal;
const vtkm::Id Round;
}; // IsAttachementPointNeededPredicateImpl
// Predicate ExecutonObject used in HierarchicalAugmenter<FieldType>::PrepareOutAttachmentPoints
// to determine whether an attachement points needs to be transferred
class IsAttachementPointNeededPredicate : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
IsAttachementPointNeededPredicate(
const vtkm::worklet::contourtree_augmented::IdArrayType& superparentsRounds,
const vtkm::worklet::contourtree_augmented::IdArrayType& whichRounds,
const vtkm::Id round)
: SuperparentsRounds(superparentsRounds)
, WhichRounds(whichRounds)
, Round(round)
{
}
VTKM_CONT IsAttachementPointNeededPredicateImpl
PrepareForExecution(vtkm::cont::DeviceAdapterId device, vtkm::cont::Token& token) const
{
return IsAttachementPointNeededPredicateImpl(
this->SuperparentsRounds, this->WhichRounds, this->Round, device, token);
}
private:
vtkm::worklet::contourtree_augmented::IdArrayType SuperparentsRounds;
vtkm::worklet::contourtree_augmented::IdArrayType WhichRounds;
const vtkm::Id Round;
}; // IsAttachementPointNeededPredicate
} // namespace hierarchical_augmenter
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,141 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_attachement_point_predicate_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_is_attachement_point_predicate_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Predicate used in HierarchicalAugmenter<FieldType>::Initalize to determine
/// whether a node is an attachement point
class IsAttachementPointPredicateImpl
{
public:
using IdPortalType = vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
// constructor - takes vectors as parameters
VTKM_CONT
IsAttachementPointPredicateImpl(
const vtkm::worklet::contourtree_augmented::IdArrayType& superarcs,
const vtkm::worklet::contourtree_augmented::IdArrayType& whichRound,
const vtkm::Id numRounds,
vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token)
: SuperarcsPortal(superarcs.PrepareForInput(device, token))
, WhichRoundPortal(whichRound.PrepareForInput(device, token))
, NumRounds(numRounds)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& supernode) const
{ // operator()
return (
vtkm::worklet::contourtree_augmented::NoSuchElement(this->SuperarcsPortal.Get(supernode)) &&
(this->WhichRoundPortal.Get(supernode) < this->NumRounds));
} // operator()
private:
IdPortalType SuperarcsPortal;
IdPortalType WhichRoundPortal;
const vtkm::Id NumRounds;
}; // IsAttachementPointPredicateImpl
class IsAttachementPointPredicate : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
IsAttachementPointPredicate(const vtkm::worklet::contourtree_augmented::IdArrayType& superarcs,
const vtkm::worklet::contourtree_augmented::IdArrayType& whichRound,
const vtkm::Id numRounds)
: Superarcs(superarcs)
, WhichRound(whichRound)
, NumRounds(numRounds)
{
}
VTKM_CONT IsAttachementPointPredicateImpl PrepareForExecution(vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const
{
return IsAttachementPointPredicateImpl(
this->Superarcs, this->WhichRound, this->NumRounds, device, token);
}
private:
vtkm::worklet::contourtree_augmented::IdArrayType Superarcs;
vtkm::worklet::contourtree_augmented::IdArrayType WhichRound;
const vtkm::Id NumRounds;
}; // IsAttachementPointPredicate
} // namespace hierarchical_augmenter
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,135 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_resize_arrays_build_new_supernode_ids_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_resize_arrays_build_new_supernode_ids_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalAugmenter<FieldType>ResizeArrays to build the newSupernodeIds array
class ResizeArraysBuildNewSupernodeIdsWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
using ControlSignature = void(
FieldIn supernodeIndex, // input domain ArrayHandleIndex(SupernodeSorter.GetNumberOfValues())
FieldIn
supernodeIdSetPermuted, // input supernodeIDSet permuted by supernodeSorter to allow for FieldIn
WholeArrayInOut
newSupernodeIds // output/input (both are necessary since not all valyes will be overwritten)
);
using ExecutionSignature = void(_1, _2, _3);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
ResizeArraysBuildNewSupernodeIdsWorklet(const vtkm::Id& numSupernodesAlready)
: NumSupernodesAlready(numSupernodesAlready)
{
}
template <typename InOutFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& supernode, // InputIndex of supernodeSorter
const vtkm::Id& oldSupernodeId, // same as supernodeIDSet[supernodeSetIndex];
const InOutFieldPortalType& newSupernodeIdsPortal) const
{
// per supernode
// retrieve the index from the sorting index array. supernodeSetIndex set on input
// work out the correct new supernode ID
vtkm::Id newSupernodeId = this->NumSupernodesAlready + supernode;
// retrieve the old supernode ID from the sorting array, remembering
// that if it came from another block it will be set to NO_SUCH_ELEMENT
// vtkm::Id oldSupernodeId set on input since we use ArrayHandlePermutation to
// shuffle supernodeIDSet by supernodeSorter;
// and write to the lookup array
if (!vtkm::worklet::contourtree_augmented::NoSuchElement(oldSupernodeId))
{
newSupernodeIdsPortal.Set(oldSupernodeId, newSupernodeId);
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = 0; supernode < supernodeSorter.size(); supernode++)
{ // per supernode
// retrieve the index from the sorting index array
vtkm::Id supernodeSetIndex = supernodeSorter[supernode];
// work out the correct new supernode ID
vtkm::Id newSupernodeID = numSupernodesAlready + supernode;
// retrieve the old supernode ID from the sorting array, remembering that if it came from another block it will be set to NO_SUCH_ELEMENT
vtkm::Id oldSupernodeID = supernodeIDSet[supernodeSetIndex];
// and write to the lookup array
if (!noSuchElement(oldSupernodeID))
newSupernodeIDs[oldSupernodeID] = newSupernodeID;
} // per supernode
*/
} // operator()()
private:
const vtkm::Id NumSupernodesAlready;
}; // ResizeArraysBuildNewSupernodeIdsWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,146 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_set_first_attachment_point_in_round_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_set_first_attachment_point_in_round_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalHyperSweeper.TransferWeights(...) to implement
/// step 7b. Now find the LHE of each group and subtract out the prior weight
class SetFirstAttachmentPointInRoundWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
/// NOTE: we need this to be in/out because any valyes we don't set here need
/// to remain NO_SUCH_ELEMENT for further processing
using ControlSignature = void(WholeArrayIn attachmentIds, // input
WholeArrayIn superparentRounds, // input
WholeArrayInOut firstAttachmentPointInRound // input/output
);
using ExecutionSignature = void(InputIndex, _1, _2, _3);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
SetFirstAttachmentPointInRoundWorklet() {}
template <typename InFieldPortalType, typename InOutFieldPortalType>
VTKM_EXEC void operator()(const vtkm::Id& attachmentPoint,
const InFieldPortalType& attachmentIdsPortal,
const InFieldPortalType& superparentRoundsPortal,
const InOutFieldPortalType& firstAttachmentPointInRoundPortal) const
{
// per attachment point
// retrieve the ID of the attachment point
vtkm::Id attachmentPointId = attachmentIdsPortal.Get(attachmentPoint);
// the 0th element always starts a segment, so set the round's beginning
if (attachmentPoint == 0)
{
firstAttachmentPointInRoundPortal.Set(superparentRoundsPortal.Get(attachmentPointId),
static_cast<vtkm::Id>(0));
}
// otherwise, a segment begins when it's SP round is different from the next one down
else
{ // not the beginning of the array
// retrieve the two attachment point IDs
vtkm::Id previousAttachmentPointId = attachmentIdsPortal.Get(attachmentPoint - 1);
// and the corresponding superparent rounds
vtkm::Id superparentRound = superparentRoundsPortal.Get(attachmentPointId);
vtkm::Id previousSuperparentRound = superparentRoundsPortal.Get(previousAttachmentPointId);
// detect where the segment ID changes & use that to set the value
if (superparentRound != previousSuperparentRound)
{
firstAttachmentPointInRoundPortal.Set(superparentRound, attachmentPoint);
}
} // not the beginning of the array
// In serial this worklet implements the following operation
/*
for (vtkm::Id attachmentPoint = 0; attachmentPoint < attachmentIDs.size(); attachmentPoint++)
{ // per attachment point
// retrieve the ID of the attachment point
vtkm::Id attachmentPointID = attachmentIDs[attachmentPoint];
// the 0th element always starts a segment, so set the round's beginning
if (attachmentPoint == 0)
firstAttachmentPointInRound[superparentRounds[attachmentPointID]] = 0;
// otherwise, a segment begins when it's SP round is different from the next one down
else
{ // not the beginning of the array
// retrieve the two attachment point IDs
vtkm::Id previousAttachmentPointID = attachmentIDs[attachmentPoint-1];
// and the corresponding superparent rounds
vtkm::Id superparentRound = superparentRounds[attachmentPointID];
vtkm::Id previousSuperparentRound = superparentRounds[previousAttachmentPointID];
// detect where the segment ID changes & use that to set the value
if (superparentRound != previousSuperparentRound)
firstAttachmentPointInRound[superparentRound] = attachmentPoint;
} // not the beginning of the array
} // per attachment point
*/
} // operator()()
}; // SetFirstAttachmentPointInRoundWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,94 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_set_superparent_set_decorator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_set_superparent_set_decorator_h
#include <vtkm/Types.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Decorator to add the Ascending flag if necessary
class SetSuperparentSetDecorator
{
public:
template <typename PortalType1, typename PortalType2>
struct Functor
{
PortalType1 KeptSupernodesPortal;
PortalType2 BaseTreeSuperarcsPortal;
VTKM_EXEC_CONT vtkm::Id operator()(vtkm::Id i) const
{
vtkm::Id oldSupernodeId = KeptSupernodesPortal.Get(i);
return (oldSupernodeId |
(vtkm::worklet::contourtree_augmented::IsAscending(
BaseTreeSuperarcsPortal.Get(oldSupernodeId))
? vtkm::worklet::contourtree_augmented::IS_ASCENDING
: 0x00));
}
};
template <typename PT1, typename PT2>
Functor<PT1, PT2> CreateFunctor(PT1 KeptSupernodesPortal, PT2 BaseTreeSuperarcsPortal) const
{
return { KeptSupernodesPortal, BaseTreeSuperarcsPortal };
}
};
} // namespace hierarchical_augmenter
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,153 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_update_hyperstructure_set_hyperarcs_and_nodes_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_update_hyperstructure_set_hyperarcs_and_nodes_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalAugmenter::UpdateHyperstructure to set the hyperarcs and hypernodes
class UpdateHyperstructureSetHyperarcsAndNodesWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
using ControlSignature = void(FieldIn baseTreeHypernodes, // input
FieldIn baseTreeHyperarcs, // input
WholeArrayIn newSupernodeIds, // input
FieldOut augmentedTreeHypernodes, // output
FieldOut augmentedTreeHyperarcs // output
);
using ExecutionSignature = void(_1, _2, _3, _4, _5);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
UpdateHyperstructureSetHyperarcsAndNodesWorklet() {}
template <typename InFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& oldHypernodeSuperId, // same as baseTree->hypernodes[hypernode]
const vtkm::Id& oldTargetSuperIdMasked, // same as baseTree->hyperarcs[hypernode]
const InFieldPortalType& newSupernodeIdsPortal,
vtkm::Id& outAugmentedTreeHypernodesValue, // same as augmentedTree->hypernodes[hypernode] = ...
vtkm::Id& outAugmentedTreeHyperarcsValue // same as augmentedTree->hyperarcs[hypernode] = ...
) const
{
// per hypernode
// retrieve existing values which are in old supernode Ids
// oldHypernodeSuperId and oldTargetSuperIdMasked are set by the worklt
// strip out the ascending flag & the flag for root hyperarc
bool isRootHyperarc =
vtkm::worklet::contourtree_augmented::NoSuchElement(oldTargetSuperIdMasked);
bool hyperarcAscends =
vtkm::worklet::contourtree_augmented::IsAscending(oldTargetSuperIdMasked);
vtkm::Id oldTargetSuperId =
vtkm::worklet::contourtree_augmented::MaskedIndex(oldTargetSuperIdMasked);
// lookup new values
vtkm::Id newHypernodeSuperId = newSupernodeIdsPortal.Get(oldHypernodeSuperId);
vtkm::Id newTargetSuperId = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
if (!isRootHyperarc)
{ // not the root
// lookup the new ID
newTargetSuperId = newSupernodeIdsPortal.Get(oldTargetSuperId);
if (hyperarcAscends)
{
newTargetSuperId |= vtkm::worklet::contourtree_augmented::IS_ASCENDING;
}
} // not the root
// now store them
outAugmentedTreeHypernodesValue = newHypernodeSuperId;
outAugmentedTreeHyperarcsValue = newTargetSuperId;
// In serial this worklet implements the following operation
/*
for (vtkm::Id hypernode = 0; hypernode < augmentedTree->hypernodes.size(); hypernode++)
{ // per hypernode
// retrieve existing values which are in old supernode IDs
vtkm::Id oldHypernodeSuperID = baseTree->hypernodes[hypernode];
vtkm::Id oldTargetSuperID = baseTree->hyperarcs[hypernode];
// strip out the ascending flag & the flag for root hyperarc
bool isRootHyperarc = noSuchElement(oldTargetSuperID);
bool hyperarcAscends = isAscending(oldTargetSuperID);
oldTargetSuperID = maskedIndex(oldTargetSuperID);
// lookup new values
vtkm::Id newHypernodeSuperID = newSupernodeIDs[oldHypernodeSuperID];
vtkm::Id newTargetSuperID = NO_SUCH_ELEMENT;
if (!isRootHyperarc)
{ // not the root
// lookup the new ID
newTargetSuperID = newSupernodeIDs[oldTargetSuperID];
if (hyperarcAscends)
newTargetSuperID |= IS_ASCENDING;
} // not the root
// now store them
augmentedTree->hypernodes[hypernode] = newHypernodeSuperID;
augmentedTree->hyperarcs[hypernode] = newTargetSuperID;
} // per hypernode
*/
} // operator()()
}; // UpdateHyperstructureSetHyperarcsAndNodesWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,135 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_update_hyperstructure_set_superchildren_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_augmenter_update_hyperstructure_set_superchildren_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_augmenter
{
/// Worklet used in HierarchicalAugmenter::UpdateHyperstructure to set the superchildren
/// The worklet finds the number of superchildren as the delta between the super Id
/// and the next hypernode's super Id
class UpdateHyperstructureSetSuperchildrenWorklet : public vtkm::worklet::WorkletMapField
{
public:
/// Control signature for the worklet
using ControlSignature = void(
WholeArrayIn augmentedTreeHypernodes, // input (we need both this and the next value)
FieldOut augmentedTreeSuperchildren // output
);
using ExecutionSignature = void(InputIndex, _1, _2);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
UpdateHyperstructureSetSuperchildrenWorklet(const vtkm::Id& augmentedTreeNumSupernodes)
: AugmentedTreeNumSupernodes(augmentedTreeNumSupernodes)
{
}
template <typename InFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& hypernode,
const InFieldPortalType& augmentedTreeHypernodesPortal,
vtkm::Id&
augmentedTreeSuperchildrenValue // same as augmentedTree->superchildren[InputIndex] = ...
) const
{
// per hypernode
// retrieve the new superId
vtkm::Id superId = augmentedTreeHypernodesPortal.Get(hypernode);
// and the next one over
vtkm::Id nextSuperId;
if (hypernode == augmentedTreeHypernodesPortal.GetNumberOfValues() - 1)
{
nextSuperId = this->AugmentedTreeNumSupernodes;
}
else
{
nextSuperId = augmentedTreeHypernodesPortal.Get(hypernode + 1);
}
// the difference is the number of superchildren
augmentedTreeSuperchildrenValue = nextSuperId - superId;
// In serial this worklet implements the following operation
/*
for (vtkm::Id hypernode = 0; hypernode < augmentedTree->hypernodes.size(); hypernode++)
{ // per hypernode
// retrieve the new super ID
vtkm::Id superID = augmentedTree->hypernodes[hypernode];
// and the next one over
vtkm::Id nextSuperID;
if (hypernode == augmentedTree->hypernodes.size() - 1)
nextSuperID = augmentedTree->supernodes.size();
else
nextSuperID = augmentedTree->hypernodes[hypernode+1];
// the difference is the number of superchildren
augmentedTree->superchildren[hypernode] = nextSuperID - superID;
} // per hypernode
*/
} // operator()()
private:
const vtkm::Id AugmentedTreeNumSupernodes;
}; // UpdateHyperstructureSetSuperchildrenWorklet
} // namespace hierarchical_augmenter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,22 @@
##============================================================================
## Copyright (c) Kitware, Inc.
## All rights reserved.
## See LICENSE.txt for details.
##
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
##============================================================================
set(headers
InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet.h
InitializeIntrinsicVertexCountInitalizeCountsWorklet.h
InitializeIntrinsicVertexCountSubtractLowEndWorklet.h
ComputeSuperarcDependentWeightsWorklet.h
ComputeSuperarcTransferWeightsWorklet.h
TransferTargetComperator.h
TransferWeightsUpdateRHEWorklet.h
TransferWeightsUpdateLHEWorklet.h
)
vtkm_declare_headers(${headers})

@ -0,0 +1,198 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_compute_superarc_dependent_weights_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_compute_superarc_dependent_weights_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.ComputeSuperarcDependentWeightsWorklet(...) to
/// compute the superarc dependent weights
template <typename FieldType>
class ComputeSuperarcDependentWeightsWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(
FieldIn supernodeIndex, // counting index [firstSupernode, lastSupernode)
FieldIn
hierarchicalTreeSuperarcsView, // view of hierarchicalTree.Superarcs[firstSupernode, lastSupernode)
FieldIn
hierarchicalTreeHyperparentsView, // view of hierarchicalTree.Hyperparents[firstSupernode, lastSupernode)
WholeArrayIn hierarchicalTreeHypernodes, // whole hierarchicalTree.Hypernodes array
WholeArrayIn valuePrefixSum, // whole valuePrefixSum array
FieldInOut dependentValuesView // output view of dependentValues[firstSupernode, lastSupernode)
);
using ExecutionSignature = void(_1, _2, _3, _4, _5, _6);
// Default Constructor
VTKM_EXEC_CONT
ComputeSuperarcDependentWeightsWorklet(const vtkm::Id& firstSupernode,
const vtkm::Id& round,
const vtkm::Id& hierarchicalTreeNumRounds)
: FirstSupernode(firstSupernode)
, Round(round)
, HierarchicalTreeNumRounds(hierarchicalTreeNumRounds)
{
}
template <typename InFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& supernode,
const vtkm::Id& superarcTo, // same as hierarchicalTree.superarcs[supernode];
const vtkm::Id& hyperparent, // same as hierarchicalTree.hyperparents[supernode];
const InFieldPortalType& hierarchicalTreeHypernodesPortal,
const InFieldPortalType& valuePrefixSumPortal,
vtkm::Id& dependentValue) const
{
// per supernode
// if there is no superarc, it is either the root of the tree or an attachment point
if (vtkm::worklet::contourtree_augmented::NoSuchElement(superarcTo))
{ // null superarc
// next we test for whether it is the global root
if (this->Round == this->HierarchicalTreeNumRounds)
{ // global root
// this is guaranteed to be the only element in it's iteration
// so the prefix sum is good as it stands
dependentValue = valuePrefixSumPortal.Get(supernode);
} // global root
else
{ // attachment point
// could be the first in the iteration, in which case it is correct
if (supernode == this->FirstSupernode)
{
dependentValue = valuePrefixSumPortal.Get(supernode);
}
// otherwise, we are guaranteed that it's a length one chain, so subtract predecessor
else
{
dependentValue =
valuePrefixSumPortal.Get(supernode) - valuePrefixSumPortal.Get(supernode - 1);
}
} // attachment point
} // null superarc
else
{ // actual superarc
// use the hyperparent to find the hypernode at the beginning of the chain
vtkm::Id hyperparentSuperId = hierarchicalTreeHypernodesPortal.Get(hyperparent);
// now we check to see which value we subtract
FieldType baseValue = 0;
if (hyperparentSuperId != this->FirstSupernode)
{
baseValue = valuePrefixSumPortal.Get(hyperparentSuperId - 1);
}
// for all others, remove the hyperparent's prefix sum to get the "relative" prefix sum
dependentValue = valuePrefixSumPortal.Get(supernode) - baseValue;
} // actual superarc
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = firstSupernode; supernode < lastSupernode; supernode++)
{ // per supernode
// we need to know the superarc first
vtkm::Id superarcTo = hierarchicalTree.superarcs[supernode];
// if there is no superarc, it is either the root of the tree or an attachment point
if (noSuchElement(superarcTo))
{ // null superarc
// next we test for whether it is the global root
if (round == hierarchicalTree.nRounds)
{ // global root
// this is guaranteed to be the only element in it's iteration
// so the prefix sum is good as it stands
dependentValues[supernode] = valuePrefixSum[supernode];
} // global root
else
{ // attachment point
// could be the first in the iteration, in which case it is correct
if (supernode == firstSupernode)
dependentValues[supernode] = valuePrefixSum[supernode];
// otherwise, we are guaranteed that it's a length one chain, so subtract predecessor
else
dependentValues[supernode] = valuePrefixSum[supernode] - valuePrefixSum[supernode-1];
} // attachment point
} // null superarc
else
{ // actual superarc
// use the hyperparent to find the hypernode at the beginning of the chain
vtkm::Id hyperparent = hierarchicalTree.hyperparents[supernode];
vtkm::Id hyperparentSuperId = hierarchicalTree.hypernodes[hyperparent];
// now we check to see which value we subtract
dataType baseValue = 0;
if (hyperparentSuperId != firstSupernode)
baseValue = valuePrefixSum[hyperparentSuperId - 1];
// for all others, remove the hyperparent's prefix sum to get the "relative" prefix sum
dependentValues[supernode] = valuePrefixSum[supernode] - baseValue;
} // actual superarc
} // per supernode
*/
} // operator()()
private:
const vtkm::Id FirstSupernode;
const vtkm::Id Round;
const vtkm::Id HierarchicalTreeNumRounds;
}; // ComputeSuperarcDependentWeightsWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,193 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_compute_superarc_transfer_weights_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_compute_superarc_transfer_weights_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.InitializeRegularVertexCount(...) to
/// subtract out the low end from the superarc regular counts
class ComputeSuperarcTransferWeightsWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(
FieldIn supernodeIndex, // counting array [firstSupernode, lastSupernode)
FieldIn
hierarchicalTreeSupernodesView, // view of hierarchicalTree.supernodes[firstSupernode, lastSupernode)
WholeArrayIn hierarchicalTreeSuperparents, // whole array of hierarchicalTree.superparents
WholeArrayIn hierarchicalTreeHyperparents, // whole array of hierarchicalTree.hyperparents
FieldIn
hierarchicalTreeSuperarcsView, // view of hierarchicalTree.superarcs[firstSupernode, lastSupernode)
FieldOut transferTargetView // view of transferTarget[firstSupernode, lastSupernode)
);
using ExecutionSignature = void(_1, _2, _3, _4, _5, _6);
// Default Constructor
VTKM_EXEC_CONT
ComputeSuperarcTransferWeightsWorklet(const vtkm::Id& round,
const vtkm::Id& hierarchicalTreeNumRounds,
const vtkm::Id& lastSupernode)
: Round(round)
, HierarchicalTreeNumRounds(hierarchicalTreeNumRounds)
, LastSupernode(lastSupernode)
{
}
template <typename InFieldPortalType>
VTKM_EXEC void operator()(
const vtkm::Id& supernode,
const vtkm::Id& supernodeRegularId, // same as hierarchicalTree.supernodes[supernode];
const InFieldPortalType& hierarchicalTreeSuperparentsPortal,
const InFieldPortalType& hierarchicalTreeHyperparentsPortal,
const vtkm::Id& superarcTo, // same as hierarchicalTree.superarcs[supernode];
vtkm::Id& transferTarget // same as transferTarget[supernode]
) const
{
// per supernode
// if there is no superarc, it is either the root of the tree or an attachment point
if (vtkm::worklet::contourtree_augmented::NoSuchElement(superarcTo))
{ // null superarc
// next we test for whether it is the global root
if (this->Round == HierarchicalTreeNumRounds)
{ // global root
// no transfer, so no target
transferTarget = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
} // global root
else
{ // attachment point
// set the transfer target
transferTarget = hierarchicalTreeSuperparentsPortal.Get(supernodeRegularId);
} // attachment point
} // null superarc
else
{ // actual superarc
// test for the last in the subrange / last on the hyperarc
if ((supernode != this->LastSupernode - 1) &&
(hierarchicalTreeHyperparentsPortal.Get(supernode) ==
hierarchicalTreeHyperparentsPortal.Get(supernode + 1)))
{ // not a superarc we care about
transferTarget = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
} // not a superarc we care about
else
{ // a superarc we care about
// strip off the flag bits and set the weight & target
transferTarget = vtkm::worklet::contourtree_augmented::MaskedIndex(superarcTo);
} // a superarc we care about
} // actual superarc
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = firstSupernode; supernode < lastSupernode; supernode++)
{ // per supernode
// we need to know the superarc
vtkm::Id superarcTo = hierarchicalTree.superarcs[supernode];
vtkm::Id supernodeRegularId = hierarchicalTree.supernodes[supernode];
// if there is no superarc, it is either the root of the tree or an attachment point
if (noSuchElement(superarcTo))
{ // null superarc
// next we test for whether it is the global root
if (round == hierarchicalTree.nRounds)
{ // global root
// no transfer, so no target
transferTarget[supernode] = NO_SUCH_ELEMENT;
} // global root
else
{ // attachment point
// set the transfer target
transferTarget[supernode] = hierarchicalTree.superparents[supernodeRegularId];
} // attachment point
} // null superarc
else
{ // actual superarc
// test for the last in the subrange / last on the hyperarc
if ((supernode != lastSupernode - 1) && (hierarchicalTree.hyperparents[supernode] == hierarchicalTree.hyperparents[supernode+1]))
{ // not a superarc we care about
transferTarget[supernode] = NO_SUCH_ELEMENT;
} // not a superarc we care about
else
{ // a superarc we care about
// strip off the flag bits
superarcTo = maskedIndex(superarcTo);
// and set the weight & target
transferTarget[supernode] = superarcTo;
} // a superarc we care about
} // actual superarc
} // per supernode
*/
} // operator()()
private:
const vtkm::Id Round;
const vtkm::Id HierarchicalTreeNumRounds;
const vtkm::Id LastSupernode;
}; // ComputeSuperarcTransferWeightsWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,160 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_compute_superparent_ids_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_compute_superparent_ids_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.InitializeIntrinsicVertexCount(...) to
/// Look up the global Ids in the hierarchical tree & convert to superparent Ids
class InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet
: public vtkm::worklet::WorkletMapField
{
public:
// TODO: We could avoid the need for WholeArrayIn if we did the findRegularByGlobal mapping outside of the worklet first and then use the mapped
using ControlSignature = void(FieldIn globalIds, // input
ExecObject findRegularByGlobal, // input
WholeArrayIn hierarchicalTreeRegular2Supernode, // input
WholeArrayIn hierarchicalTreeSuperparents, // input
FieldOut superparents // output
);
using ExecutionSignature = void(_1, _2, _3, _4, _5);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet() {}
template <typename ExecObjType, typename InFieldPortalType>
VTKM_EXEC void operator()(const vtkm::Id& globalId,
const ExecObjType& findRegularByGlobal,
const InFieldPortalType& hierarchicalTreeRegular2SupernodePortal,
const InFieldPortalType& hierarchicalTreeSuperparentsPortal,
vtkm::Id& superparent) const
{
// per vertex
// retrieve the regular Id (should ALWAYS exist)
vtkm::Id hierarchicalRegularId = findRegularByGlobal(globalId);
// be paranoid
if (vtkm::worklet::contourtree_augmented::NoSuchElement(hierarchicalRegularId))
{
superparent = vtkm::worklet::contourtree_augmented::NO_SUCH_ELEMENT;
}
else
{
// Attachment points cause a minor problem - they are supernodes, but can have a different
// superparent than themselves (or the same one). We therefore test explicitly whether we
// are a supernode, and use either supernodeId or superparent depending on this test
// retrieve the super Id
vtkm::Id superId = hierarchicalTreeRegular2SupernodePortal.Get(hierarchicalRegularId);
// if it doesn't have one, use it's superparent
if (vtkm::worklet::contourtree_augmented::NoSuchElement(superId))
{
superparent = hierarchicalTreeSuperparentsPortal.Get(hierarchicalRegularId);
}
else
{
// if it does have a superId, use it
superparent = superId;
}
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id vertex = 0; vertex < globalIds.GetNumberOfValues(); vertex++)
{ // per vertex
// retrieve the regular Id (should ALWAYS exist)
vtkm::Id hierarchicalRegularId = hierarchicalTree.FindRegularByGlobal(globalIds[vertex]);
// be paranoid
if (noSuchElement(hierarchicalRegularId))
superparents[vertex] = NO_SUCH_ELEMENT;
else
{ // found a regular Id
// Attachment points cause a minor problem - they are supernodes, but can have a different
// superparent than themselves (or the same one). We therefore test explicitly whether we
// are a supernode, and use either supernodeId or superparent depending on this test
// retrieve the super Id
vtkm::Id superId = hierarchicalTree.regular2supernode[hierarchicalRegularId];
// if it doesn't have one, use it's superparent
if (noSuchElement(superId))
superparents[vertex] = hierarchicalTree.superparents[hierarchicalRegularId];
else
// if it does have a superId, use it
superparents[vertex] = superId;
} // found a regular Id
} // per vertex
*/
} // operator()()
}; // InitializeIntrinsicVertexCountComputeSuperparentIdsWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,138 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_initialize_counts_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_initialize_counts_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.InitializeIntrinsicVertexCount(...) to
/// set the count to the Id one off the high end of each range
class InitializeIntrinsicVertexCountInitalizeCountsWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(WholeArrayIn superparents, WholeArrayInOut superarcRegularCounts);
using ExecutionSignature = void(InputIndex, _1, _2);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
InitializeIntrinsicVertexCountInitalizeCountsWorklet() {}
template <typename InFieldPortalType, typename InOutFieldPortalType>
VTKM_EXEC void operator()(const vtkm::Id& vertex,
const InFieldPortalType superparentsPortal,
const InOutFieldPortalType superarcRegularCountsPortal) const
{
// per vertex
// retrieve the superparent
vtkm::Id superparent = superparentsPortal.Get(vertex);
// if it's NSE, ignore (should never happen, but . . . )
if (vtkm::worklet::contourtree_augmented::NoSuchElement(superparent))
{
return;
}
// if its the last element, always write
if (vertex == superparentsPortal.GetNumberOfValues() - 1)
{
superarcRegularCountsPortal.Set(superparent, vertex + 1);
}
// otherwise, only write if different from next one
else
{
if (superparentsPortal.Get(vertex + 1) != superparent)
{
superarcRegularCountsPortal.Set(superparent, vertex + 1);
}
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id vertex = 0; vertex < superparents.size(); vertex++)
{ // per vertex
// retrieve the superparent
vtkm::Id superparent = superparents[vertex];
// if it's NSE, ignore (should never happen, but . . . )
if (noSuchElement(superparent))
continue;
// if its the last element, always write
if (vertex == superparents.size() - 1)
superarcRegularCounts[superparent] = vertex+1;
// otherwise, only write if different from next one
else
if (superparents[vertex+1] != superparent)
superarcRegularCounts[superparent] = vertex+1;
} // per vertex
*/
} // operator()()
}; // InitializeIntrinsicVertexCountInitalizeCountsWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,141 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_subtract_low_end_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_initialize_intrinsic_vertex_count_subtract_low_end_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.InitializeIntrinsicVertexCount(...) to
/// subtract out the low end from the superarc regular counts
class InitializeIntrinsicVertexCountSubtractLowEndWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(WholeArrayIn superparents, WholeArrayInOut superarcRegularCounts);
using ExecutionSignature = void(InputIndex, _1, _2);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
InitializeIntrinsicVertexCountSubtractLowEndWorklet() {}
template <typename InFieldPortalType, typename InOutFieldPortalType>
VTKM_EXEC void operator()(const vtkm::Id& vertex,
const InFieldPortalType superparentsPortal,
const InOutFieldPortalType superarcRegularCountsPortal) const
{
// per vertex
// retrieve the superparent
vtkm::Id superparent = superparentsPortal.Get(vertex);
// if it's NSE, ignore (should never happen, but . . . )
if (vtkm::worklet::contourtree_augmented::NoSuchElement(superparent))
{
return;
}
// if its the first element, always write
if (vertex == 0)
{
superarcRegularCountsPortal.Set(superparent,
superarcRegularCountsPortal.Get(superparent) - vertex);
}
// otherwise, only write if different from previous one
else
{
if (superparentsPortal.Get(vertex - 1) != superparent)
{
superarcRegularCountsPortal.Set(superparent,
superarcRegularCountsPortal.Get(superparent) - vertex);
}
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id vertex = 0; vertex < superparents.GetNumberOfValues(); vertex++)
{ // per vertex
// retrieve the superparent
vtkm::Id superparent = superparents[vertex];
// if it's NSE, ignore (should never happen, but . . . )
if (noSuchElement(superparent))
continue;
// if its the first element, always write
if (vertex == 0)
superarcRegularCounts[superparent] -= vertex;
// otherwise, only write if different from previous one
else
if (superparents[vertex-1] != superparent)
superarcRegularCounts[superparent] -= vertex;
} // per vertex
*/
} // operator()()
}; // InitializeIntrinsicVertexCountSubtractLowEndWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,141 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_target_comperator_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_target_comperator_h
#include <vtkm/cont/ArrayHandle.h>
#include <vtkm/cont/ExecutionObjectBase.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
// comperator function for an indirect sort on the superarc target
class TransferTargetComperatorImpl
{
public:
using IdArrayPortalType =
typename vtkm::worklet::contourtree_augmented::IdArrayType::ReadPortalType;
// constructor - takes vectors as parameters
VTKM_CONT
TransferTargetComperatorImpl(IdArrayPortalType superarcPortal)
: SuperarcPortal(superarcPortal)
{ // constructor
} // constructor
// () operator - gets called to do comparison
VTKM_EXEC
bool operator()(const vtkm::Id& left, const vtkm::Id& right) const
{ // operator()
// NOTE: We need to explicitly check for NO_SUCH_ELEMENT here since vtkm::Id is signed
// while the index time in PPP is unsigned. Thus, for PPP "regular" indices are always
// smaller that NO_SUCH_ELEMENT, while with the signed vtkm::Id, NO_SUCH_ELEMENT is
// negative and the order is not as intented.
// TODO/FIXME: Verify this implementation is correct.
// TODO/FIXME: Is there a better way to do this?
auto leftVal = this->SuperarcPortal.Get(left);
auto rightVal = this->SuperarcPortal.Get(right);
if (vtkm::worklet::contourtree_augmented::NoSuchElement(leftVal))
{
return false;
}
else if (vtkm::worklet::contourtree_augmented::NoSuchElement(rightVal))
{
return true;
}
else
{
return vtkm::worklet::contourtree_augmented::MaskedIndex(leftVal) <
vtkm::worklet::contourtree_augmented::MaskedIndex(rightVal);
}
} // operator()
private:
IdArrayPortalType SuperarcPortal;
}; // TransferTargetComperatorImpl
class TransferTargetComperator : public vtkm::cont::ExecutionObjectBase
{
public:
// constructor - takes vectors as parameters
VTKM_CONT
TransferTargetComperator(const vtkm::worklet::contourtree_augmented::IdArrayType superarcs)
: Superarcs(superarcs)
{ // constructor
} // constructor
VTKM_CONT TransferTargetComperatorImpl PrepareForExecution(vtkm::cont::DeviceAdapterId device,
vtkm::cont::Token& token) const
{
return TransferTargetComperatorImpl(this->Superarcs.PrepareForInput(device, token));
}
private:
vtkm::worklet::contourtree_augmented::IdArrayType Superarcs;
}; // TransferTargetComperator
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,123 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_weights_update_lhe_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_weights_update_lhe_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.TransferWeights(...) to implement
/// step 7b. Now find the LHE of each group and subtract out the prior weight
class TransferWeightsUpdateLHEWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(FieldIn sortedTransferTargetPortal,
FieldIn sortedTransferTargetShiftedView,
FieldIn valuePrefixSumShiftedView,
WholeArrayInOut dependentValuesPortal);
using ExecutionSgnature = void(_1, _2, _3, _4);
template <typename InOutPortalType>
VTKM_EXEC void operator()(const vtkm::Id& sortedTransferTargetValue,
const vtkm::Id& sortedTransferTargetPreviousValue,
const vtkm::Id& valuePrefixSumPreviousValue,
InOutPortalType& dependentValuesPortal) const
{
// per supernode
// ignore any that point at NO_SUCH_ELEMENT
if (vtkm::worklet::contourtree_augmented::NoSuchElement(sortedTransferTargetValue))
{
return;
}
if (sortedTransferTargetValue != sortedTransferTargetPreviousValue)
{
auto originalValue = dependentValuesPortal.Get(sortedTransferTargetValue);
dependentValuesPortal.Set(sortedTransferTargetValue,
originalValue - valuePrefixSumPreviousValue);
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = firstSupernode + 1; supernode < lastSupernode; supernode++)
{ // per supernode
// ignore any that point at NO_SUCH_ELEMENT
if (noSuchElement(sortedTransferTarget[supernode]))
continue;
// the LHE at 0 is special - it subtracts zero. In practice, since NO_SUCH_ELEMENT will sort low, this will never
// occur, but let's keep the logic strict
if (sortedTransferTarget[supernode] != sortedTransferTarget[supernode-1])
{ // LHE not 0
dependentValues[sortedTransferTarget[supernode]] -= valuePrefixSum[supernode-1];
} // LHE not 0
} // per supernode
*/
} // operator()()
}; // TransferWeightsUpdateLHEWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -0,0 +1,135 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_weights_update_rhe_worklet_h
#define vtk_m_worklet_contourtree_distributed_hierarchical_hyper_sweeper_transfer_weights_update_rhe_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace hierarchical_hyper_sweeper
{
/// Worklet used in HierarchicalHyperSweeper.TransferWeights(...) to implement
/// step 7a. Find the RHE of each group and transfer the prefix sum weight.
/// Note that we do not compute the transfer weight separately, we add it in place instead
class TransferWeightsUpdateRHEWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature =
void(FieldIn supernodeIndex, // input counting array [firstSupernode, lastSupernode)
WholeArrayIn sortedTransferTarget,
FieldIn valuePrefixSumView, // input view of valuePrefixSum[firstSupernode, lastSupernode)
WholeArrayInOut dependentValuesPortal);
using ExecutionSignature = void(_1, _2, _3, _4);
// Default Constructor
VTKM_EXEC_CONT
TransferWeightsUpdateRHEWorklet(const vtkm::Id& lastSupernode)
: LastSupernode(lastSupernode)
{
}
template <typename InPortalType, typename OutPortalType>
VTKM_EXEC void operator()(const vtkm::Id& supernode,
const InPortalType& sortedTransferTargetPortal,
const vtkm::Id& valuePrefixSum, // same as valuePrefixSum[supernode]
OutPortalType& dependentValuesPortal) const
{
// per supernode
// ignore any that point at NO_SUCH_ELEMENT
vtkm::Id transferTarget = sortedTransferTargetPortal.Get(supernode);
if (!vtkm::worklet::contourtree_augmented::NoSuchElement(transferTarget))
{
// the RHE of each segment transfers its weight (including all irrelevant prefixes)
if ((supernode == this->LastSupernode - 1) ||
(transferTarget != sortedTransferTargetPortal.Get(supernode + 1)))
{ // RHE of segment
auto originalValue = dependentValuesPortal.Get(transferTarget);
dependentValuesPortal.Set(transferTarget, originalValue + valuePrefixSum);
} // RHE of segment
}
// In serial this worklet implements the following operation
/*
for (vtkm::Id supernode = firstSupernode; supernode < lastSupernode; supernode++)
{ // per supernode
// ignore any that point at NO_SUCH_ELEMENT
if (noSuchElement(sortedTransferTarget[supernode]))
continue;
// the RHE of each segment transfers its weight (including all irrelevant prefixes)
if ((supernode == lastSupernode - 1) || (sortedTransferTarget[supernode] != sortedTransferTarget[supernode+1]))
{ // RHE of segment
dependentValues[sortedTransferTarget[supernode]] += valuePrefixSum[supernode];
} // RHE of segment
} // per supernode
*/
} // operator()()
private:
const vtkm::Id LastSupernode;
}; // TransferWeightsUpdateRHEWorklet
} // namespace hierarchical_hyper_sweeper
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -32,6 +32,7 @@ set(headers
CopyNewNodesSetSuperparentsWorklet.h
CopyFirstSupernodePerIterationWorklet.h
CopyFirstHypernodePerIterationWorklet.h
CalculateAttachementCounterWorklet.h
)
vtkm_declare_headers(${headers})

@ -0,0 +1,105 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_distributed_tree_grafter_calculate_attachement_counter_worklet_h
#define vtk_m_worklet_contourtree_distributed_tree_grafter_calculate_attachement_counter_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_distributed
{
namespace tree_grafter
{
// In TreeGrafter.GraftInteriorForests to compute attachmentCounter to
// check whether additional attachement points need to be transferred
class CalculateAttachementCounterWorklet : public vtkm::worklet::WorkletMapField
{
public:
using ControlSignature = void(FieldIn supernodeType, // input
FieldIn hierarchicalSuperId, // input
FieldOut attachmentCounter // output
);
using ExecutionSignature = _3(_1, _2);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT
CalculateAttachementCounterWorklet() {}
VTKM_EXEC vtkm::Id operator()(const vtkm::Id& supernodeType,
const vtkm::Id& hierarchicalSuperId) const
{ // operator ()
return (supernodeType == vtkm::worklet::contourtree_augmented::IS_ATTACHMENT) &&
vtkm::worklet::contourtree_augmented::NoSuchElement(hierarchicalSuperId);
// In serial this worklet implements the following operation
/*
for (indexType supernode = 0; supernode < contourTree->supernodes.size(); supernode++)
attachmentCounter[supernode] = (supernodeType[supernode] == IS_ATTACHMENT) && noSuchElement(hierarchicalSuperID[supernode]);
*/
} // operator ()
}; // CalculateAttachementCounterWorklet
} // namespace tree_grafter
} // namespace contourtree_distributed
} // namespace worklet
} // namespace vtkm
#endif

@ -10,6 +10,7 @@
set(headers
GenerateTestDataSets.h
TestingContourTreeUniformDistributedLoadArrays.h
)
set(unit_tests

@ -0,0 +1,120 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifndef vtk_m_worklet_testing_contourtree_distributed_load_arrays_h
#define vtk_m_worklet_testing_contourtree_distributed_load_arrays_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
namespace vtkm
{
namespace worklet
{
namespace testing
{
namespace contourtree_distributed
{
// Types used in binary test files
typedef size_t FileSizeType;
typedef unsigned long long FileIndexType;
const FileIndexType FileIndexMask = 0x07FFFFFFFFFFFFFFLL;
typedef double FileDataType;
inline void ReadIndexArray(std::ifstream& is, vtkm::cont::ArrayHandle<vtkm::Id>& indexArray)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading index array of size " << sz << std::endl;
indexArray.Allocate(sz);
auto writePortal = indexArray.WritePortal();
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
FileIndexType x;
is.read(reinterpret_cast<char*>(&x), sizeof(x));
// Covert from index type size in file (64 bit) to index type currently used by
// shifting the flag portion of the index accordingly
vtkm::Id shiftedFlagVal = (x & FileIndexMask) |
((x & ~FileIndexMask) >> ((sizeof(FileIndexType) - sizeof(vtkm::Id)) << 3));
writePortal.Set(i, shiftedFlagVal);
}
}
inline void ReadIndexArrayVector(std::ifstream& is,
std::vector<vtkm::cont::ArrayHandle<vtkm::Id>>& indexArrayVector)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading vector of " << sz << " index arrays" << std::endl;
indexArrayVector.resize(sz);
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
ReadIndexArray(is, indexArrayVector[i]);
}
}
template <class FieldType>
inline void ReadDataArray(std::ifstream& is, vtkm::cont::ArrayHandle<FieldType>& dataArray)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading data array of size " << sz << std::endl;
dataArray.Allocate(sz);
auto writePortal = dataArray.WritePortal();
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
FileDataType x;
is.read(reinterpret_cast<char*>(&x), sizeof(x));
//std::cout << "Read " << x << std::endl;
writePortal.Set(
i,
FieldType(x)); // Test data is stored as double but generally is also ok to be cast to float.
}
}
} // namespace contourtree_distributed
} // namespace testing
} // namespace worklet
} // namespace vtkm
#endif

@ -50,19 +50,75 @@
// Oliver Ruebel (LBNL)
//==============================================================================
// #define DEBUG_PRINT
// #define PRINT_RESULT
#include <vtkm/cont/testing/MakeTestDataSet.h>
#include <vtkm/cont/testing/Testing.h>
#include <vtkm/worklet/contourtree_augmented/DataSetMesh.h>
#include <vtkm/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/worklet/contourtree_augmented/meshtypes/ContourTreeMesh.h>
#include <vtkm/worklet/contourtree_distributed/CombineHyperSweepBlockFunctor.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/worklet/contourtree_distributed/HierarchicalHyperSweeper.h>
#include <vtkm/worklet/contourtree_distributed/HyperSweepBlock.h>
#include <vtkm/worklet/contourtree_distributed/SpatialDecomposition.h>
#include <vtkm/worklet/testing/TestingContourTreeUniformDistributedLoadArrays.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/diy/diy.h>
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
namespace
{
template <typename FieldType>
void LoadHierarchicalContourTree(
const char* filename,
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType>& ht)
{
using vtkm::worklet::testing::contourtree_distributed::ReadIndexArray;
using vtkm::worklet::testing::contourtree_distributed::ReadIndexArrayVector;
//template <typename FieldType>
using vtkm::worklet::testing::contourtree_distributed::ReadDataArray; //<FieldType>;
std::ifstream is(filename, std::ios_base::binary);
ReadIndexArray(is, ht.RegularNodeGlobalIds);
ReadDataArray<FieldType>(is, ht.DataValues);
ReadIndexArray(is, ht.RegularNodeSortOrder);
ReadIndexArray(is, ht.Regular2Supernode);
ReadIndexArray(is, ht.Superparents);
ReadIndexArray(is, ht.Supernodes);
ReadIndexArray(is, ht.Superarcs);
ReadIndexArray(is, ht.Hyperparents);
ReadIndexArray(is, ht.Super2Hypernode);
ReadIndexArray(is, ht.WhichRound);
ReadIndexArray(is, ht.WhichIteration);
ReadIndexArray(is, ht.Hypernodes);
ReadIndexArray(is, ht.Hyperarcs);
ReadIndexArray(is, ht.Superchildren);
int nRounds;
is.read(reinterpret_cast<char*>(&nRounds), sizeof(nRounds));
//std::cout << "nRounds = " << nRounds << std::endl;
ht.NumRounds = nRounds;
//ht.NumOwnedRegularVertices = 0;
ReadIndexArray(is, ht.NumRegularNodesInRound);
ReadIndexArray(is, ht.NumSupernodesInRound);
ReadIndexArray(is, ht.NumHypernodesInRound);
ReadIndexArray(is, ht.NumIterations);
ReadIndexArrayVector(is, ht.FirstSupernodePerIteration);
ReadIndexArrayVector(is, ht.FirstHypernodePerIteration);
}
template <typename FieldType>
void TestContourTreeMeshCombine(const std::string& mesh1_filename,
const std::string& mesh2_filename,
const std::string& combined_filename)
{
std::cout << "Testing combining meshes " << mesh1_filename << " " << mesh2_filename
<< " with expected result " << combined_filename << std::endl;
vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType> contourTreeMesh1;
contourTreeMesh1.Load(mesh1_filename.c_str());
vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType> contourTreeMesh2;
@ -85,6 +141,253 @@ void TestContourTreeMeshCombine(const std::string& mesh1_filename,
VTKM_TEST_ASSERT(contourTreeMesh2.MaxNeighbors == combinedContourTreeMesh.MaxNeighbors);
}
void TestHierarchicalHyperSweeper()
{
std::cout << "Testing HierarchicalHyperSweeper" << std::endl;
using vtkm::cont::testing::Testing;
using ContourTreeDataFieldType = vtkm::FloatDefault;
// Test input
const int numBlocks = 4;
const char* filenames[numBlocks] = { "misc/8x9test_HierarchicalAugmentedTree_Block0.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block1.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block2.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block3.dat" };
vtkm::Id3 globalSize{ 9, 8, 1 };
vtkm::Id3 blocksPerDim{ 2, 2, 1 };
vtkm::Id3 sizes[numBlocks] = { { 5, 4, 1 }, { 5, 5, 1 }, { 5, 4, 1 }, { 5, 5, 1 } };
vtkm::Id3 origins[numBlocks] = { { 0, 0, 0 }, { 0, 3, 0 }, { 4, 0, 0 }, { 4, 3, 0 } };
vtkm::Id3 blockIndices[numBlocks] = { { 0, 0, 0 }, { 0, 1, 0 }, { 1, 0, 0 }, { 1, 1, 0 } };
// Expected output
vtkm::cont::ArrayHandle<vtkm::Id> expectedIntrinsicVolume[numBlocks] = {
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 2 })
};
vtkm::cont::ArrayHandle<vtkm::Id> expectedDependentVolume[numBlocks] = {
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 2 })
};
// Create spatial decomposition
vtkm::worklet::contourtree_distributed::SpatialDecomposition spatialDecomp(
blocksPerDim,
globalSize,
vtkm::cont::make_ArrayHandle(blockIndices, numBlocks, vtkm::CopyFlag::Off),
vtkm::cont::make_ArrayHandle(origins, numBlocks, vtkm::CopyFlag::Off),
vtkm::cont::make_ArrayHandle(sizes, numBlocks, vtkm::CopyFlag::Off));
// Load trees
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<vtkm::FloatDefault>
hct[numBlocks];
for (vtkm::Id blockNo = 0; blockNo < numBlocks; ++blockNo)
{
LoadHierarchicalContourTree(Testing::DataPath(filenames[blockNo]).c_str(), hct[blockNo]);
#ifdef DEBUG_PRINT
std::cout << hct[blockNo].DebugPrint("AfterLoad", __FILE__, __LINE__);
#endif
}
// Create and add DIY blocks
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
vtkm::Id rank = comm.rank();
vtkmdiy::Master master(comm,
1, // Use 1 thread, VTK-M will do the treading
-1 // All block in memory
);
// Set up connectivity
using RegularDecomposer = vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>;
RegularDecomposer::BoolVector shareFace(3, true);
RegularDecomposer::BoolVector wrap(3, false);
RegularDecomposer::CoordinateVector ghosts(3, 1);
RegularDecomposer::DivisionsVector diyDivisions{ 2, 2, 1 }; // HARDCODED FOR TEST
int numDims = static_cast<int>(globalSize[2] > 1 ? 3 : 2);
RegularDecomposer decomposer(numDims,
spatialDecomp.GetVTKmDIYBounds(),
static_cast<int>(spatialDecomp.GetGlobalNumberOfBlocks()),
shareFace,
wrap,
ghosts,
diyDivisions);
// ... coordinates of local blocks
auto localBlockIndicesPortal = spatialDecomp.LocalBlockIndices.ReadPortal();
std::vector<int> vtkmdiyLocalBlockGids(numBlocks);
for (vtkm::Id bi = 0; bi < numBlocks; bi++)
{
RegularDecomposer::DivisionsVector diyCoords(static_cast<size_t>(numDims));
auto currentCoords = localBlockIndicesPortal.Get(bi);
for (vtkm::IdComponent d = 0; d < numDims; ++d)
{
diyCoords[d] = static_cast<int>(currentCoords[d]);
}
vtkmdiyLocalBlockGids[static_cast<size_t>(bi)] =
RegularDecomposer::coords_to_gid(diyCoords, diyDivisions);
}
// Define which blocks live on which rank so that vtkmdiy can manage them
vtkmdiy::DynamicAssigner assigner(
comm, comm.size(), static_cast<int>(spatialDecomp.GetGlobalNumberOfBlocks()));
for (vtkm::Id bi = 0; bi < numBlocks; bi++)
{
assigner.set_rank(static_cast<int>(rank),
static_cast<int>(vtkmdiyLocalBlockGids[static_cast<size_t>(bi)]));
}
vtkmdiy::fix_links(master, assigner);
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>*
localHyperSweeperBlocks[numBlocks];
for (vtkm::Id blockNo = 0; blockNo < numBlocks; ++blockNo)
{
localHyperSweeperBlocks[blockNo] =
new vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>(
blockNo,
vtkmdiyLocalBlockGids[blockNo],
origins[blockNo],
sizes[blockNo],
globalSize,
hct[blockNo]);
master.add(
vtkmdiyLocalBlockGids[blockNo], localHyperSweeperBlocks[blockNo], new vtkmdiy::Link());
}
master.foreach (
[](vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"Before initializing HyperSweeper", __FILE__, __LINE__);
#endif
// Create HyperSweeper
vtkm::worklet::contourtree_distributed::HierarchicalHyperSweeper<vtkm::Id,
ContourTreeDataFieldType>
hyperSweeper(
b->GlobalBlockId, b->HierarchicalContourTree, b->IntrinsicVolume, b->DependentVolume);
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After initializing HyperSweeper", __FILE__, __LINE__);
#endif
// Create mesh and initialize vertex counts
vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler idRelabeler{ b->Origin,
b->Size,
b->GlobalSize };
if (b->GlobalSize[2] <= 1)
{
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation2DFreudenthal mesh(
vtkm::Id2{ b->Size[0], b->Size[1] });
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
else
{
// TODO/FIXME: For getting owned vertices, it should not make a difference if marching
// cubes or not. Verify.
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation3DFreudenthal mesh(b->Size);
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After initializing intrinsic vertex count", __FILE__, __LINE__);
#endif
// Initialize dependentVolume by copy from intrinsicVolume
vtkm::cont::Algorithm::Copy(b->IntrinsicVolume, b->DependentVolume);
// Perform the local hypersweep
hyperSweeper.LocalHyperSweep();
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After local hypersweep", __FILE__, __LINE__);
#endif
});
// Reduce
// partners for merge over regular block grid
vtkmdiy::RegularSwapPartners partners(
decomposer, // domain decomposition
2, // radix of k-ary reduction.
true // contiguous: true=distance doubling, false=distance halving
);
vtkmdiy::reduce(master,
assigner,
partners,
vtkm::worklet::contourtree_distributed::CobmineHyperSweepBlockFunctor<
ContourTreeDataFieldType>{});
#ifdef PRINT_RESULT
// Print
vtkm::Id totalVolume = globalSize[0] * globalSize[1] * globalSize[2];
master.foreach (
[&totalVolume](
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << "=========" << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(b->IntrinsicVolume.GetNumberOfValues(),
std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, std::cout);
std::cout << b->HierarchicalContourTree.DebugPrint(
"Called from DumpVolumes", __FILE__, __LINE__);
std::cout << vtkm::worklet::contourtree_distributed::HierarchicalContourTree<
ContourTreeDataFieldType>::DumpVolumes(b->HierarchicalContourTree.Supernodes,
b->HierarchicalContourTree.Superarcs,
b->HierarchicalContourTree.RegularNodeGlobalIds,
totalVolume,
b->IntrinsicVolume,
b->DependentVolume);
});
#endif
// Compare to expected results
master.foreach (
[&expectedIntrinsicVolume, &expectedDependentVolume](
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
#ifdef DEBUG_PRINT
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Expected Intrinsic Volume", expectedIntrinsicVolume[b->GlobalBlockId], -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Expected Dependent Volume", expectedDependentVolume[b->GlobalBlockId], -1, std::cout);
#endif
VTKM_TEST_ASSERT(test_equal_portals(expectedIntrinsicVolume[b->GlobalBlockId].ReadPortal(),
b->IntrinsicVolume.ReadPortal()));
VTKM_TEST_ASSERT(test_equal_portals(expectedDependentVolume[b->GlobalBlockId].ReadPortal(),
b->DependentVolume.ReadPortal()));
});
// Clean-up
for (auto b : localHyperSweeperBlocks)
{
delete b;
}
}
void TestContourTreeUniformDistributed()
{
using vtkm::cont::testing::Testing;
@ -92,6 +395,8 @@ void TestContourTreeUniformDistributed()
Testing::DataPath("misc/5x6_7_MC_Rank0_Block0_Round1_BeforeCombineMesh1.ctm"),
Testing::DataPath("misc/5x6_7_MC_Rank0_Block0_Round1_BeforeCombineMesh2.ctm"),
Testing::RegressionImagePath("5x6_7_MC_Rank0_Block0_Round1_CombinedMesh.ctm"));
TestHierarchicalHyperSweeper();
}
} // anonymous namespace