Refactoring.

This commit is contained in:
Petar Hritov 2020-07-03 18:28:21 +01:00
parent edd2585752
commit 9e8784a780
12 changed files with 430 additions and 1501 deletions

@ -84,15 +84,8 @@
#include <vtkm/cont/Invoker.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/AddDependentWeightHypersweep.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeBestUpDown.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeEulerTourFirstNext.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeEulerTourList.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeMinMaxValues.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/EulerTour.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/HypwersweepWorklets.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/PointerDoubling.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/PrefixScanHyperarcs.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/SweepHyperarcs.h>
//#define DEBUG_PRINT
@ -107,6 +100,7 @@ namespace worklet
namespace contourtree_augmented
{
// TODO Many of the post processing routines still need to be parallelized
// Class with routines for post processing the contour tree
class ProcessContourTree
@ -221,12 +215,12 @@ public:
} // CollectSortedSuperarcs()
// routine to compute the volume for each hyperarc and superarc
void static ComputeVolumeWeights(const ContourTree& contourTree,
const vtkm::Id nIterations,
IdArrayType& superarcIntrinsicWeight,
IdArrayType& superarcDependentWeight,
IdArrayType& supernodeTransferWeight,
IdArrayType& hyperarcDependentWeight)
void static ComputeVolumeWeightsSerial(const ContourTree& contourTree,
const vtkm::Id nIterations,
IdArrayType& superarcIntrinsicWeight,
IdArrayType& superarcDependentWeight,
IdArrayType& supernodeTransferWeight,
IdArrayType& hyperarcDependentWeight)
{ // ContourTreeMaker::ComputeWeights()
// start by storing the first sorted vertex ID for each superarc
IdArrayType firstVertexForSuperparent;
@ -409,14 +403,14 @@ public:
} // ContourTreeMaker::ComputeWeights()
// routine to compute the branch decomposition by volume
void static ComputeVolumeBranchDecomposition(const ContourTree& contourTree,
const IdArrayType& superarcDependentWeight,
const IdArrayType& superarcIntrinsicWeight,
IdArrayType& whichBranch,
IdArrayType& branchMinimum,
IdArrayType& branchMaximum,
IdArrayType& branchSaddle,
IdArrayType& branchParent)
void static ComputeVolumeBranchDecompositionSerial(const ContourTree& contourTree,
const IdArrayType& superarcDependentWeight,
const IdArrayType& superarcIntrinsicWeight,
IdArrayType& whichBranch,
IdArrayType& branchMinimum,
IdArrayType& branchMaximum,
IdArrayType& branchSaddle,
IdArrayType& branchParent)
{ // ComputeVolumeBranchDecomposition()
//auto superarcsPortal = contourTree.Superarcs.ReadPortal();
//auto superarcDependentWeightPortal = superarcDependentWeight.ReadPortal();
@ -587,9 +581,6 @@ public:
vtkm::cont::ArrayHandleConstant<vtkm::Id>((vtkm::Id)NO_SUCH_ELEMENT, nSupernodes);
vtkm::cont::ArrayCopy(noSuchElementArray, whichBranch);
vtkm::cont::Timer timer;
timer.Start();
// STAGE III: For each vertex, identify which neighbours are on same branch
// Let v = BestUp(u). Then if u = BestDown(v), copy BestUp(u) to whichBranch(u)
// Otherwise, let whichBranch(u) = BestUp(u) | TERMINAL to mark the end of the side branch
@ -600,21 +591,12 @@ public:
propagateBestUpDownWorklet;
invoke(propagateBestUpDownWorklet, bestUpward, bestDownward, whichBranch);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Propagating Branches took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Propagating Branches took.\n", timer.GetElapsedTime());
}
#ifdef DEBUG_PRINT
std::cout << "III. Branch Neighbours Identified" << std::endl;
PrintHeader(whichBranch.GetNumberOfValues());
PrintIndices("Which Branch", whichBranch);
std::cout << std::endl;
#endif
timer.Reset();
timer.Start();
// STAGE IV: Use pointer-doubling on whichBranch to propagate branches
// Compute the number of log steps required in this pass
@ -632,13 +614,6 @@ public:
} // per iteration
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Branch Point Doubling " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Branch Point Doubling.\n", timer.GetElapsedTime());
}
#ifdef DEBUG_PRINT
std::cout << "IV. Branch Chains Propagated" << std::endl;
PrintHeader(whichBranch.GetNumberOfValues());
@ -646,9 +621,6 @@ public:
std::cout << std::endl;
#endif
timer.Reset();
timer.Start();
// Initialise
IdArrayType chainToBranch;
vtkm::cont::ArrayCopy(vtkm::cont::ArrayHandleConstant<vtkm::Id>(0, nSupernodes), chainToBranch);
@ -667,16 +639,6 @@ public:
finaliseChainToBranchWorklet;
invoke(finaliseChainToBranchWorklet, whichBranch, chainToBranch);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Create Chain to Branch " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Create Chain to Branch.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
// V B. Create the arrays for the branches
auto noSuchElementArrayNBranches =
vtkm::cont::ArrayHandleConstant<vtkm::Id>((vtkm::Id)NO_SUCH_ELEMENT, nBranches);
@ -685,13 +647,6 @@ public:
vtkm::cont::ArrayCopy(noSuchElementArrayNBranches, branchSaddle);
vtkm::cont::ArrayCopy(noSuchElementArrayNBranches, branchParent);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Array Coppying " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Array Coppying.\n", timer.GetElapsedTime());
}
#ifdef DEBUG_PRINT
std::cout << "V. Branch Arrays Created" << std::endl;
PrintHeader(chainToBranch.GetNumberOfValues());
@ -703,36 +658,14 @@ public:
PrintIndices("Branch Parent", branchParent);
#endif
timer.Reset();
timer.Start();
IdArrayType supernodeSorter;
vtkm::cont::ArrayCopy(vtkm::cont::ArrayHandleCounting<vtkm::Id>(0, 1, nSupernodes),
supernodeSorter);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Supernode Sorter " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Supernode Sorter.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
vtkm::cont::Algorithm::Sort(
supernodeSorter,
process_contourtree_inc_ns::SuperNodeBranchComparator(whichBranch, contourTree.Supernodes));
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- VTKM Sorting " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for VTKM Sorting.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
IdArrayType permutedBranches;
permutedBranches.Allocate(nSupernodes);
PermuteArray<vtkm::Id>(whichBranch, supernodeSorter, permutedBranches);
@ -741,13 +674,6 @@ public:
permutedRegularID.Allocate(nSupernodes);
PermuteArray<vtkm::Id>(contourTree.Supernodes, supernodeSorter, permutedRegularID);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Array Permuting " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Array Permuting.\n", timer.GetElapsedTime());
}
#ifdef DEBUG_PRINT
std::cout << "VI A. Sorted into Branches" << std::endl;
PrintHeader(nSupernodes);
@ -756,34 +682,14 @@ public:
PrintIndices("Regular ID", permutedRegularID);
#endif
timer.Reset();
timer.Start();
vtkm::worklet::contourtree_augmented::process_contourtree_inc::WhichBranchNewId
whichBranchNewIdWorklet;
invoke(whichBranchNewIdWorklet, chainToBranch, whichBranch);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Which Branch Initialisation " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Which Branch Initialisation.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
vtkm::worklet::contourtree_augmented::process_contourtree_inc::BranchMinMaxSet
branchMinMaxSetWorklet(nSupernodes);
invoke(branchMinMaxSetWorklet, supernodeSorter, whichBranch, branchMinimum, branchMaximum);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "----------------//---------------- Branch min/max setting " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Branch min/max setting.\n", timer.GetElapsedTime());
}
#ifdef DEBUG_PRINT
std::cout << "VI. Branches Set" << std::endl;
PrintHeader(nBranches);
@ -793,9 +699,6 @@ public:
PrintIndices("Branch Parent", branchParent);
#endif
//timer.Reset();
//timer.Start();
vtkm::worklet::contourtree_augmented::process_contourtree_inc::BranchSaddleParentSet
branchSaddleParentSetWorklet;
invoke(branchSaddleParentSetWorklet,
@ -807,16 +710,6 @@ public:
branchSaddle,
branchParent);
//return ;
//timer.Stop();
//if (printTime >= 2)
//{
////std::cout << "----------------//---------------- Branch parents & Saddles setting " << timer.GetElapsedTime() << " seconds." << std::endl;
//printf("%.6f for Branch parents & Saddles setting.\n", timer.GetElapsedTime());
//}
#ifdef DEBUG_PRINT
std::cout << "VII. Branches Constructed" << std::endl;
PrintHeader(nBranches);
@ -856,90 +749,66 @@ public:
}
// routine to compute the branch decomposition by volume
void static ComputeVolumeBranchDecompositionNew(const ContourTree& contourTree,
const cont::ArrayHandle<Float64> fieldValues,
const IdArrayType& ctSortOrder,
const int printTime,
const vtkm::Id nIterations,
IdArrayType& whichBranch,
IdArrayType& branchMinimum,
IdArrayType& branchMaximum,
IdArrayType& branchSaddle,
IdArrayType& branchParent)
void static ComputeVolumeBranchDecomposition(const ContourTree& contourTree,
const cont::ArrayHandle<Float64> fieldValues,
const IdArrayType& ctSortOrder,
const int printTime,
const vtkm::Id nIterations,
IdArrayType& whichBranch,
IdArrayType& branchMinimum,
IdArrayType& branchMaximum,
IdArrayType& branchSaddle,
IdArrayType& branchParent)
{ // ComputeHeightBranchDecomposition()
// STEP 1. Compute the number of nodes in every superarc
vtkm::cont::Invoker Invoke;
// STEP 1. Compute the number of nodes in every superarc, that's the intrinsic weight
IdArrayType superarcIntrinsicWeight;
superarcIntrinsicWeight.Allocate(contourTree.Superarcs.GetNumberOfValues());
IdArrayType firstVertexForSuperparent;
firstVertexForSuperparent.Allocate(contourTree.Superarcs.GetNumberOfValues());
auto nodesPortal = contourTree.Nodes.ReadPortal();
auto superparentsPortal = contourTree.Superparents.ReadPortal();
auto superarcIntrinsicWeightPortal = superarcIntrinsicWeight.WritePortal();
auto firstVertexForSuperparentPortal = firstVertexForSuperparent.WritePortal();
// Compute the number of regular nodes on every superarcs (the intrinsic weight)
vtkm::worklet::contourtree_augmented::process_contourtree_inc::SetFirstVertexForSuperparent
setFirstVertexForSuperparent;
Invoke(setFirstVertexForSuperparent,
contourTree.Nodes,
contourTree.Superparents,
firstVertexForSuperparent);
for (vtkm::Id sortedNode = 0; sortedNode < contourTree.Arcs.GetNumberOfValues(); sortedNode++)
{ // per node in sorted order
vtkm::Id sortID = nodesPortal.Get(sortedNode);
vtkm::Id superparent = superparentsPortal.Get(sortID);
if (sortedNode == 0)
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
else if (superparent != superparentsPortal.Get(nodesPortal.Get(sortedNode - 1)))
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
} // per node in sorted order
vtkm::worklet::contourtree_augmented::process_contourtree_inc::ComputeIntrinsicWeight
computeIntrinsicWeight;
Invoke(computeIntrinsicWeight,
contourTree.Arcs,
contourTree.Superarcs,
firstVertexForSuperparent,
superarcIntrinsicWeight);
// Compute the number of regular nodes on every superarc
for (vtkm::Id superarc = 0; superarc < contourTree.Superarcs.GetNumberOfValues(); superarc++)
{
if (superarc == contourTree.Superarcs.GetNumberOfValues() - 1)
{
superarcIntrinsicWeightPortal.Set(superarc,
contourTree.Arcs.GetNumberOfValues() -
firstVertexForSuperparentPortal.Get(superarc));
}
else
{
superarcIntrinsicWeightPortal.Set(superarc,
firstVertexForSuperparentPortal.Get(superarc + 1) -
firstVertexForSuperparentPortal.Get(superarc));
}
}
//// Cache the number of non-root supernodes & superarcs
// Cache the number of non-root supernodes & superarcs
vtkm::Id nSupernodes = contourTree.Supernodes.GetNumberOfValues();
auto noSuchElementArray =
vtkm::cont::ArrayHandleConstant<vtkm::Id>((vtkm::Id)NO_SUCH_ELEMENT, nSupernodes);
//// Set up bestUpward and bestDownward array, these are the things we want to compute in this routine.
// Set up bestUpward and bestDownward array, these are the things we want to compute in this routine.
IdArrayType bestUpward, bestDownward;
vtkm::cont::ArrayCopy(noSuchElementArray, bestUpward);
vtkm::cont::ArrayCopy(noSuchElementArray, bestDownward);
//// We initiale with the weight of the superarcs, once we sum those up we'll get the hypersweep weight
// We initiale with the weight of the superarcs, once we sum those up we'll get the hypersweep weight
IdArrayType sumValues;
vtkm::cont::ArrayCopy(superarcIntrinsicWeight, sumValues);
//// This should be 0 here, because we're not changing the root
// This should be 0 here, because we're not changing the root
vtkm::cont::ArrayHandle<vtkm::Id> howManyUsed;
vtkm::cont::ArrayCopy(
vtkm::cont::ArrayHandleConstant<vtkm::Id>(0, contourTree.Hyperarcs.GetNumberOfValues()),
howManyUsed);
////
//// Min Hypersweep
////
const auto sumOperator = vtkm::Sum();
vtkm::cont::Timer hypersweepTimer;
hypersweepTimer.Start();
// Perform a sum hypersweep
hyperarcScan<decltype(vtkm::Sum())>(contourTree.Supernodes,
contourTree.Hypernodes,
contourTree.Hyperarcs,
@ -951,59 +820,21 @@ public:
vtkm::Sum(),
sumValues);
// For every directed arc store the volume of it's associate subtree
vtkm::cont::ArrayHandle<vtkm::worklet::contourtree_augmented::EdgeDataVolume> arcs;
arcs.Allocate(contourTree.Superarcs.GetNumberOfValues() * 2 - 2);
vtkm::Id totalVolume = contourTree.Nodes.GetNumberOfValues();
vtkm::worklet::contourtree_augmented::process_contourtree_inc::InitialiseArcsVolume initArcs(
totalVolume);
Invoke(initArcs, sumValues, superarcIntrinsicWeight, contourTree.Superarcs, arcs);
for (int i = 0; i < contourTree.Supernodes.GetNumberOfValues(); i++)
{
Id parent = MaskedIndex(contourTree.Superarcs.ReadPortal().Get(i));
if (parent == 0)
continue;
EdgeDataVolume edge;
edge.i = i;
edge.j = parent;
edge.upEdge = IsAscending((contourTree.Superarcs.ReadPortal().Get(i)));
//edge.subtreeVolume = sumValues.ReadPortal().Get(i);
edge.subtreeVolume = (totalVolume - sumValues.ReadPortal().Get(i)) +
(superarcIntrinsicWeight.ReadPortal().Get(i) - 1);
EdgeDataVolume oppositeEdge;
oppositeEdge.i = parent;
oppositeEdge.j = i;
oppositeEdge.upEdge = !edge.upEdge;
//oppositeEdge.subtreeVolume = (totalVolume - sumValues.ReadPortal().Get(i)) + (superarcIntrinsicWeight.ReadPortal().Get(i) - 1);
oppositeEdge.subtreeVolume = sumValues.ReadPortal().Get(i);
//std::cout << "The edge " << edge.i << ", " << edge.j << " has sum value " << edge.subtreeVolume << std::endl;
//std::cout << "The edge " << oppositeEdge.i << ", " << oppositeEdge.j << " has sum value " << oppositeEdge.subtreeVolume << std::endl;
arcs.WritePortal().Set(i * 2, edge);
arcs.WritePortal().Set(i * 2 + 1, oppositeEdge);
}
// Sort arcs to obtain the best up and down
vtkm::cont::Algorithm::Sort(arcs, vtkm::SortLess());
vtkm::cont::Invoker Invoke;
vtkm::worklet::contourtree_augmented::process_contourtree_inc::SetBestUpDown setBestUpDown;
Invoke(setBestUpDown, bestUpward, bestDownward, arcs);
for (int i = 0; i < arcs.GetNumberOfValues(); i++)
{
//printf("Arc %d has i = %lld, j = %lld, upEdge = %d and subtreeVolume = %lld \n", i, arcs.ReadPortal().Get(i).i, arcs.ReadPortal().Get(i).j, arcs.ReadPortal().Get(i).upEdge, arcs.ReadPortal().Get(i).subtreeVolume);
//cout << "Arc " << i << ""
}
for (int i = 0; i < bestUpward.GetNumberOfValues(); i++)
{
//std::cout << i << " Up - " << bestUpward.ReadPortal().Get(i) << std::endl;
//std::cout << i << " Down - " << bestDownward.ReadPortal().Get(i) << std::endl;
}
ProcessContourTree::ComputeBranchData(contourTree,
printTime,
whichBranch,
@ -1029,19 +860,6 @@ public:
IdArrayType& branchParent)
{ // ComputeHeightBranchDecomposition()
vtkm::cont::Timer timerTotal;
timerTotal.Start();
vtkm::cont::Timer timerTotalAll;
timerTotalAll.Start();
vtkm::cont::Timer timer;
timer.Start();
double minHypersweepTime = 0.0;
double maxHypersweepTime = 0.0;
double bothHypersweepTime = 0.0;
// Cache the number of non-root supernodes & superarcs
vtkm::Id nSupernodes = contourTree.Supernodes.GetNumberOfValues();
auto noSuchElementArray =
@ -1067,33 +885,12 @@ public:
Id maxSuperNode = MaskedIndex(
contourTree.Superparents.ReadPortal().Get(contourTree.Nodes.GetNumberOfValues() - 1));
timer.Stop();
vtkm::Float64 ellapsedTime = timer.GetElapsedTime();
if (printTime >= 2)
{
//std::cout << "---------------- Initialising Array too " << ellapsedTime << " seconds." << std::endl;
printf("%.6f for Initialising Array.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
// Find the path from the global minimum to the root, not parallelisable (but it's fast, no need to parallelise)
auto minPath = findSuperPathToRoot(contourTree.Superarcs.ReadPortal(), minSuperNode);
// Find the path from the global minimum to the root, not parallelisable (but it's fast, no need to parallelise)
auto maxPath = findSuperPathToRoot(contourTree.Superarcs.ReadPortal(), maxSuperNode);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Finding min/max paths to the root took \t\t" << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Finding min/max paths to the root.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
// Reserve the direction of the superarcs on the min path.
for (uint i = 1; i < minPath.size(); i++)
{
@ -1108,19 +905,8 @@ public:
}
maxParents.WritePortal().Set(maxPath[0], 0);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Rerooting took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Rerooting.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
vtkm::cont::Invoker Invoke;
vtkm::worklet::contourtree_augmented::process_contourtree_inc::UnmaskArray unmaskArrayWorklet;
Invoke(unmaskArrayWorklet, minValues);
Invoke(unmaskArrayWorklet, maxValues);
@ -1129,6 +915,7 @@ public:
vtkm::cont::ArrayCopy(contourTree.Hyperarcs, minHyperarcs);
vtkm::cont::ArrayCopy(contourTree.Hyperarcs, maxHyperarcs);
// These arrays hold the changed hyperarcs for the min and max hypersweep
vtkm::cont::ArrayHandle<vtkm::Id> minHyperparents, maxHyperparents;
vtkm::cont::ArrayCopy(contourTree.Hyperparents, minHyperparents);
vtkm::cont::ArrayCopy(contourTree.Hyperparents, maxHyperparents);
@ -1156,34 +943,16 @@ public:
vtkm::cont::ArrayHandleConstant<vtkm::Id>(0, maxHyperarcs.GetNumberOfValues()),
maxHowManyUsed);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Initialising more stuff took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Initialising more stuff.\n", timer.GetElapsedTime());
}
// Total HS Timer
timer.Reset();
timer.Start();
//timer.Reset();
//timer.Start();
//
// Min Hypersweep
//
const auto minOperator = vtkm::Minimum();
// Cut hyperarcs at the first node on the path from the max to the root
editHyperarcs(contourTree.Hyperparents.ReadPortal(),
minPath,
minHyperarcs.WritePortal(),
minHowManyUsed.WritePortal());
vtkm::cont::Timer hypersweepTimer;
hypersweepTimer.Start();
// Perform an ordinary hypersweep on those new hyperarcs
hyperarcScan<decltype(vtkm::Minimum())>(contourTree.Supernodes,
contourTree.Hypernodes,
minHyperarcs,
@ -1195,29 +964,19 @@ public:
vtkm::Minimum(),
minValues);
hypersweepTimer.Stop();
minHypersweepTime = hypersweepTimer.GetElapsedTime();
if (printTime >= 1)
{
//std::cout << "---------------- Initialising more stuff took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for the Min Hypersweep.\n", hypersweepTimer.GetElapsedTime());
}
hypersweepTimer.Reset();
hypersweepTimer.Start();
// Prefix sum along the path from the min to the root
fixPath(vtkm::Minimum(), minPath, minValues.WritePortal());
//
// Max Hypersweep
//
const auto maxOperator = vtkm::Maximum();
// Cut hyperarcs at the first node on the path from the max to the root
editHyperarcs(contourTree.Hyperparents.ReadPortal(),
maxPath,
maxHyperarcs.WritePortal(),
maxHowManyUsed.WritePortal());
// Perform an ordinary hypersweep on those new hyperarcs
hyperarcScan<decltype(vtkm::Maximum())>(contourTree.Supernodes,
contourTree.Hypernodes,
maxHyperarcs,
@ -1229,26 +988,11 @@ public:
vtkm::Maximum(),
maxValues);
// Prefix sum along the path from the max to the root
fixPath(vtkm::Maximum(), maxPath, maxValues.WritePortal());
timer.Stop();
hypersweepTimer.Stop();
maxHypersweepTime = hypersweepTimer.GetElapsedTime();
bothHypersweepTime = timer.GetElapsedTime();
if (printTime >= 1)
{
//std::cout << "---------------- HS TOTAL ---------------- Total Hypersweep took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Max Hypersweep.\n", hypersweepTimer.GetElapsedTime());
printf("%.6f for BOTH Hypersweeps.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
IdArrayType maxValuesCopy, minValuesCopy;
vtkm::cont::ArrayCopy(maxValues, maxValuesCopy);
vtkm::cont::ArrayCopy(minValues, minValuesCopy);
// For every directed edge (a, b) consider that subtree who's root is b and does not contain a.
// We have so far found the min and max in all sub subtrees, now we compare those to a and incorporate a into that.
vtkm::worklet::contourtree_augmented::process_contourtree_inc::IncorporateParent<decltype(
vtkm::Minimum())>
incorporateParentMinimumWorklet(minOperator);
@ -1259,16 +1003,7 @@ public:
incorporateParentMaximumWorklet(maxOperator);
Invoke(incorporateParentMaximumWorklet, maxParents, contourTree.Supernodes, maxValues);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Incorporating Parent took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Incorporating Parent.\n", timer.GetElapsedTime());
}
timer.Reset();
timer.Start();
// Initialise all directed superarcs in the contour tree. Those will correspond to subtrees whos height we need for the branch decomposition.
vtkm::cont::ArrayHandle<vtkm::worklet::contourtree_augmented::EdgeData> arcs;
arcs.Allocate(contourTree.Superarcs.GetNumberOfValues() * 2 - 2);
@ -1277,73 +1012,19 @@ public:
Invoke(initArcs, minParents, maxParents, minValues, maxValues, contourTree.Superarcs, arcs);
//
// Set whether an arc is up or down arcs. Parallelisable. No HS.
//
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Initialising arcs took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Initialising arcs.\n", timer.GetElapsedTime());
}
//
// Compute the height of all subtrees using the min and max
//
timer.Reset();
timer.Start();
// Use the min & max to compute the height of all subtrees
vtkm::worklet::contourtree_augmented::process_contourtree_inc::ComputeSubtreeHeight
computeSubtreeHeight;
Invoke(computeSubtreeHeight, fieldValues, ctSortOrder, contourTree.Supernodes, arcs);
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Computing subtree height took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Computing subtree height.\n", timer.GetElapsedTime());
}
//
// Sort to find the bestUp and Down
//
timer.Reset();
timer.Start();
// Sort all directed edges based on the height of their subtree
vtkm::cont::Algorithm::Sort(arcs, vtkm::SortLess());
timer.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Sorting took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Sorting.\n", timer.GetElapsedTime());
}
//
// Set bestUp and bestDown. Parallelisable
//
timer.Reset();
timer.Start();
// Select a best up and best down neighbour for every vertex in the contour tree using heights of all subtrees
vtkm::worklet::contourtree_augmented::process_contourtree_inc::SetBestUpDown setBestUpDown;
Invoke(setBestUpDown, bestUpward, bestDownward, arcs);
timer.Stop();
timerTotal.Stop();
if (printTime >= 2)
{
//std::cout << "---------------- Setting bestUp/Down took " << timer.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for Setting bestUp/Down.\n", timer.GetElapsedTime());
//std::cout << "---------------- TOTAL TIME for 1st Part is " << timerTotal.GetElapsedTime() << " seconds." << std::endl;
printf("%.6f for TOTAL TIME for 1st Part.\n", timerTotal.GetElapsedTime());
printf("\n\n");
}
timer.Reset();
timer.Start();
// Having computed the bestUp/Down we can propagte those to obtain the branches of the branch decomposition
ProcessContourTree::ComputeBranchData(contourTree,
printTime,
whichBranch,
@ -1354,30 +1035,6 @@ public:
bestUpward,
bestDownward);
timer.Stop();
timerTotalAll.Stop();
ellapsedTime = timer.GetElapsedTime();
if (printTime >= 2)
{
printf("%.6f for Computing branch data.\n", timer.GetElapsedTime());
}
if (printTime >= 1)
{
printf("%.6f TOTAL Branch Decomposition.\n", timerTotalAll.GetElapsedTime());
}
if (printTime >= 0)
{
//printf("MinHypersweep, MaxHypersweep, BothHypersweep, TotalBD\n");
printf("%.8f, %.8f, %.8f, %.8f",
minHypersweepTime,
maxHypersweepTime,
bothHypersweepTime,
timerTotalAll.GetElapsedTime());
}
//printf("Working!");
} // ComputeHeightBranchDecomposition()
std::vector<Id> static findSuperPathToRoot(
@ -1464,32 +1121,23 @@ public:
{
using vtkm::worklet::contourtree_augmented::MaskedIndex;
vtkm::cont::Invoker invoke;
auto supernodesPortal = supernodes.ReadPortal();
auto hypernodesPortal = hypernodes.ReadPortal();
auto hyperarcsPortal = hyperarcs.ReadPortal();
auto hyperparentsPortal = hyperparents.ReadPortal();
auto whenTransferredPortal = whenTransferred.ReadPortal();
auto howManyUsedPortal = howManyUsed.ReadPortal();
// Set the first supernode per iteration
vtkm::cont::ArrayHandle<vtkm::Id> firstSupernodePerIteration;
vtkm::cont::ArrayCopy(vtkm::cont::ArrayHandleConstant<vtkm::Id>(0, nIterations + 1),
firstSupernodePerIteration);
auto firstSupernodePerIterationPortal = firstSupernodePerIteration.WritePortal();
// The first different from the previous is the first in the iteration
for (vtkm::Id supernode = 0; supernode < supernodesPortal.GetNumberOfValues(); supernode++)
{
vtkm::Id when = MaskedIndex(whenTransferredPortal.Get(supernode));
if (supernode == 0)
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
else if (when != MaskedIndex(whenTransferredPortal.Get(supernode - 1)))
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
}
vtkm::worklet::contourtree_augmented::process_contourtree_inc::SetFirstSupernodePerIteration
setFirstSupernodePerIteration;
invoke(setFirstSupernodePerIteration, whenTransferred, firstSupernodePerIteration);
auto firstSupernodePerIterationPortal = firstSupernodePerIteration.WritePortal();
// Why do we need this?
for (vtkm::Id iteration = 1; iteration < nIterations; ++iteration)
@ -1550,7 +1198,6 @@ public:
firstHypernode, 1, lastHypernode - firstHypernode);
// Transfer the value accumulated in the last entry of the prefix scan to the hypernode's targe supernode
vtkm::cont::Invoker invoke;
invoke(addDependentWeightHypersweepWorklet,
iterationHyperarcs,
hypernodes,

@ -151,9 +151,13 @@ inline std::string FlagString(vtkm::Id flaggedIndex)
class EdgeData
{
public:
// RegularNodeID (or sortIndex)
Id i;
// RegularNodeID (or sortIndex)
Id j;
// RegularNodeID (or sortIndex)
Id subtreeMin;
// RegularNodeID (or sortIndex)
Id subtreeMax;
bool upEdge;
Float64 subtreeHeight;

@ -73,6 +73,149 @@ namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class InitialiseArcsVolume : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _3;
vtkm::Id totalVolume;
VTKM_EXEC_CONT InitialiseArcsVolume(vtkm::Id _totalVolume)
: totalVolume(_totalVolume)
{
}
template <typename IdWholeArrayInPortalType, typename EdgeWholeArrayInOutPortal>
VTKM_EXEC void operator()(const vtkm::Id currentId,
const IdWholeArrayInPortalType& hypersweepSumValuesPortal,
const IdWholeArrayInPortalType& superarcIntrinsicWeightPortal,
const IdWholeArrayInPortalType& superarcsPortal,
const EdgeWholeArrayInOutPortal& arcsPortal) const
{
Id i = currentId;
Id parent = MaskedIndex(superarcsPortal.Get(i));
if (parent == 0)
{
// We expect the root to the last vertex in the supernodes array
assert(i != superarcsPortal.GetNumberOfValues() - 2);
return;
}
EdgeDataVolume edge;
edge.i = i;
edge.j = parent;
edge.upEdge = IsAscending((superarcsPortal.Get(i)));
edge.subtreeVolume =
(totalVolume - hypersweepSumValuesPortal.Get(i)) + (superarcIntrinsicWeightPortal.Get(i) - 1);
EdgeDataVolume oppositeEdge;
oppositeEdge.i = parent;
oppositeEdge.j = i;
oppositeEdge.upEdge = !edge.upEdge;
oppositeEdge.subtreeVolume = hypersweepSumValuesPortal.Get(i);
arcsPortal.Set(i * 2, edge);
arcsPortal.Set(i * 2 + 1, oppositeEdge);
}
};
class SetFirstVertexForSuperparent : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3);
using InputDomain = _1;
VTKM_EXEC_CONT SetFirstVertexForSuperparent() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(
const vtkm::Id sortedNode,
const IdWholeArrayInPortalType& nodesPortal,
const IdWholeArrayInPortalType& superparentsPortal,
const IdWholeArrayInOutPortalType& firstVertexForSuperparentPortal) const
{
vtkm::Id sortID = nodesPortal.Get(sortedNode);
vtkm::Id superparent = superparentsPortal.Get(sortID);
if (sortedNode == 0)
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
else if (superparent != superparentsPortal.Get(nodesPortal.Get(sortedNode - 1)))
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
}
};
class ComputeIntrinsicWeight : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _2;
VTKM_EXEC_CONT ComputeIntrinsicWeight() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(const vtkm::Id superarc,
const IdWholeArrayInPortalType& arcsPortal,
const IdWholeArrayInPortalType& superarcsPortal,
const IdWholeArrayInPortalType& firstVertexForSuperparentPortal,
const IdWholeArrayInOutPortalType& superarcIntrinsicWeightPortal) const
{
if (superarc == superarcsPortal.GetNumberOfValues() - 1)
{
superarcIntrinsicWeightPortal.Set(
superarc, arcsPortal.GetNumberOfValues() - firstVertexForSuperparentPortal.Get(superarc));
}
else
{
superarcIntrinsicWeightPortal.Set(superarc,
firstVertexForSuperparentPortal.Get(superarc + 1) -
firstVertexForSuperparentPortal.Get(superarc));
}
}
};
class SetFirstSupernodePerIteration : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2);
using InputDomain = _1;
VTKM_EXEC_CONT SetFirstSupernodePerIteration() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(
const vtkm::Id supernode,
const IdWholeArrayInPortalType& whenTransferredPortal,
const IdWholeArrayInOutPortalType& firstSupernodePerIterationPortal) const
{
vtkm::Id when = MaskedIndex(whenTransferredPortal.Get(supernode));
if (supernode == 0)
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
else if (when != MaskedIndex(whenTransferredPortal.Get(supernode - 1)))
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
}
};
template <typename Operator>
class AddDependentWeightHypersweep : public vtkm::worklet::WorkletMapField
{

@ -13,17 +13,9 @@ set(headers
PiecewiseLinearFunction.h
SuperArcVolumetricComparator.h
SuperNodeBranchComparator.h
ComputeBestUpDown.h
ComputeEulerTourFirstNext.h
ComputeEulerTourList.h
ComputeMinMaxValues.h
SetTriangleSuperarcId.h
EulerTour.h
PointerDoubling.h
SweepHyperarcs.h
PrefixScanHyperarcs.h
AddDependentWeightHypersweep.h
HypwersweepWorklets.h
HypersweepWorklets.h
)
#-----------------------------------------------------------------------------

@ -1,218 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_inc_compute_best_up_down_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_inc_compute_best_up_down_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
* This code is written by Petar Hristov in 09.2019
*
* This worklet is part of the Contour Tree Height Based Simplification Code.
* It selects the bestUp and the bestDown for every supernode in the contour tree.
* The best up and best down are used to construct the branches.
*
* This worklet receives a 1D array of edges, sorter by their first vertex and then by their second vertex.
* Each invocation of the worklet goes through the neighbours of one vertex and looks for the bestUp and bestDown.
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class ComputeBestUpDown : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn _1,
WholeArrayIn _2,
WholeArrayIn _3,
WholeArrayIn _4,
WholeArrayIn _5,
WholeArrayIn _6,
WholeArrayIn _7,
WholeArrayIn _8,
WholeArrayIn _9,
WholeArrayIn _10,
WholeArrayOut _11,
WholeArrayOut _12);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12);
using InputDomain = _3;
// Default Constructor
VTKM_EXEC_CONT ComputeBestUpDown() {}
template <typename FirstArrayPortalType,
typename NodesArrayPortalType,
typename SupernodesArrayPortalType,
typename MinValuesArrayPortalType,
typename MinParentsArrayPortalType,
typename MaxValuesArrayPortalType,
typename MaxParentsArrayPortalType,
typename SortOrderArrayPortalType,
typename EdgesLinearArrayPortalType,
typename FieldValuesArrayPortalType,
typename OutputArrayPortalType>
VTKM_EXEC void operator()(const vtkm::Id i,
const FirstArrayPortalType& first,
const NodesArrayPortalType& nodes,
const SupernodesArrayPortalType& supernodes,
const MinValuesArrayPortalType& minValues,
const MinParentsArrayPortalType& minParents,
const MaxValuesArrayPortalType& maxValues,
const MaxParentsArrayPortalType& maxParents,
const SortOrderArrayPortalType& ctSortOrder,
const EdgesLinearArrayPortalType& edgesLinear,
const FieldValuesArrayPortalType& fieldValues,
const OutputArrayPortalType& bestUp, // output
const OutputArrayPortalType& bestDown // output
) const
{
Id k = first.Get(i);
Float64 maxUpSubtreeHeight = 0;
Float64 maxDownSubtreeHeight = 0;
while (k < edgesLinear.GetNumberOfValues() && edgesLinear.Get(k)[0] == i)
{
Id j = edgesLinear.Get(k++)[1];
Id regularVertexValueI = MaskedIndex(supernodes.Get(i));
Id regularVertexValueJ = MaskedIndex(supernodes.Get(j));
//
// Get the minimum of subtree T(j) \cup {i}
//
// If the arc is pointed the right way (according to the rooting of the tree) use the subtree min value
// This is the minimum of T(j)
Id minValueInSubtree = MaskedIndex(supernodes.Get(minValues.Get(j)));
// See if the vertex i has a smaller value,
// This means find the minimum of T(j) \cup {i}
if (minValueInSubtree > regularVertexValueI)
{
minValueInSubtree = MaskedIndex(supernodes.Get(i));
}
// If the dirrection of the arc is not according to the rooting of the tree,
// then the minimum on that subtree must be the global minimum
if (j == minParents.Get(i))
{
minValueInSubtree = 0;
}
//
// Get the maximum of subtree T(j) \cup {i}
//
// See if the vertex i has a bigger value,
// This means find the maximum of T(j) \cup {i}
Id maxValueInSubtree = MaskedIndex(supernodes.Get(maxValues.Get(j)));
// Include the current vertex along with the subtree it points at
if (maxValueInSubtree < regularVertexValueI)
{
maxValueInSubtree = MaskedIndex(supernodes.Get(i));
}
// If the dirrection of the arc is not according to the rooting of the tree,
// then the maximum on that subtree must be the global maximum
if (j == maxParents.Get(i))
{
maxValueInSubtree = nodes.GetNumberOfValues() - 1;
}
// Afte having found the min and the max in T(j) \cup {i} we compute their height difference
Float64 minValue = fieldValues.Get(ctSortOrder.Get(minValueInSubtree));
Float64 maxValue = fieldValues.Get(ctSortOrder.Get(maxValueInSubtree));
Float64 subtreeHeight = maxValue - minValue;
// Downward Edge
if (regularVertexValueI > regularVertexValueJ)
{
if (subtreeHeight > maxDownSubtreeHeight)
{
maxDownSubtreeHeight = subtreeHeight;
bestDown.Set(i, j);
}
}
// UpwardsEdge
else
{
if (subtreeHeight > maxUpSubtreeHeight)
{
maxUpSubtreeHeight = subtreeHeight;
bestUp.Set(i, j);
}
}
}
// Make sure at least one of these was set
assert(false == NoSuchElement(bestUp.Get(i)) || false == NoSuchElement(bestDown.Get(i)));
}
}; // ComputeBestUpDown
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -1,115 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_inc_euler_tour_compute_first_next_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_inc_euler_tour_compute_first_next_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
* This code is written by Petar Hristov in 09.2019
*
* This worklet performs the first step in computing an Euler tour.
* That is computing two auxiliary array, from which we can compute the linked edge list of the Euler tour.
* See https://en.wikipedia.org/wiki/Euler_tour_technique for details
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class ComputeEulerTourFirstNext : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn edges, WholeArrayOut first, WholeArrayOut next);
typedef void ExecutionSignature(InputIndex, _1, _2, _3);
using InputDomain = _1;
// Default Constructor
VTKM_EXEC_CONT ComputeEulerTourFirstNext() {}
template <typename EdgesArrayPortalType, typename OutputArrayPortalType>
VTKM_EXEC void operator()(const vtkm::Id i,
const EdgesArrayPortalType& edges,
const OutputArrayPortalType& first,
const OutputArrayPortalType& next) const
{
if (i == 0)
{
first.Set(0, 0);
}
else
{
if (edges.Get(i)[0] != edges.Get(i - 1)[0])
{
first.Set(edges.Get(i)[0], i);
}
else
{
next.Set(i - 1, i);
}
}
}
}; // ComputeEulerTourFirstNext
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -1,130 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_inc_euler_tour_list_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_inc_euler_tour_list_h
#include <vtkm/cont/Algorithm.h>
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
* This code is written by Petar Hristov in 09.2019
*
* This worklet performs the second step in computing an Euler tour.
* That is computing the adjacency list of the euler edge tour.
* See https://en.wikipedia.org/wiki/Euler_tour_technique for details
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class ComputeEulerTourList : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn next,
WholeArrayIn first,
WholeArrayIn edges,
WholeArrayOut succ);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _1;
template <typename NextArrayPortalType,
typename FirstArrayPortalType,
typename EdgesArrayPortalType,
typename OutputArrayPortalType>
VTKM_EXEC void operator()(const vtkm::Id i,
const NextArrayPortalType& next,
const FirstArrayPortalType& first,
const EdgesArrayPortalType& edges,
const OutputArrayPortalType& succ) const
{
// For edge (a, b) find the opposite edge (b, a) in the sorted list of edges
// This relies on the fact that in the contour tree nodes have bounded degree.
// This comes from the fact that vertices in a 2D/3D mesh have a bounded degree.
auto currentEdge = edges.Get(i);
auto oppositeIndex = first.Get(currentEdge[1]);
while (oppositeIndex < edges.GetNumberOfValues() &&
currentEdge[1] == edges.Get(oppositeIndex)[0])
{
if (currentEdge[0] == edges.Get(oppositeIndex)[1])
{
break;
}
oppositeIndex++;
}
if (NO_SUCH_ELEMENT == next.Get(oppositeIndex))
{
succ.Set(i, first.Get(edges.Get(i)[1]));
}
else
{
succ.Set(i, next.Get(oppositeIndex));
}
}
}; // ComputeEulerTourList
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -1,134 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_inc_compute_min_max_values_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_inc_compute_min_max_values_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
*
* This code is written by Petar Hristov in 09.2019
*
* This worklet computes a prefix min/max over the subtree of a rooted contour tree
* using the euler tour data structure.
*
* Given an euler tour and an array which has the index of the first and last occurence of every vertex,
* this worklet goes through that subarray of the euler tour to find the min/max value (in terms of regular node Id, or isovalue, does not matter which)
*
* This can be optimised with a vtkm parallel reduce operation
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class ComputeMinMaxValues : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn supernodes,
WholeArrayIn firstLast,
WholeArrayIn tourEdges,
WholeArrayOut output);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _1;
bool isMin = true;
// Default Constructor
VTKM_EXEC_CONT ComputeMinMaxValues(bool _isMin)
: isMin(_isMin)
{
}
template <typename SupernodesArrayPortalType,
typename FirstLastArrayPortalType,
typename TourEdgesArrayPortalType,
typename OutputArrayPortalType>
VTKM_EXEC void operator()(const vtkm::Id i,
const SupernodesArrayPortalType& supernodes,
const FirstLastArrayPortalType& firstLast,
const TourEdgesArrayPortalType& tourEdges,
const OutputArrayPortalType& output) const
{
Id optimal = tourEdges.Get(firstLast.Get(i).first)[1];
for (Id j = firstLast.Get(i).first; j < firstLast.Get(i).second; j++)
{
Id vertex = tourEdges.Get(j)[1];
Id vertexValue = MaskedIndex(supernodes.Get(vertex));
Id optimalValue = MaskedIndex(supernodes.Get(optimal));
if ((true == isMin && vertexValue < optimalValue) ||
(false == isMin && vertexValue > optimalValue))
{
optimal = vertex;
}
}
output.Set(i, optimal);
}
}; // ComputeMinMaxValues
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -1,180 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_euler_tour_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_euler_tour_h
#include <vtkm/cont/ArrayCopy.h>
#include <vtkm/cont/Invoker.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeEulerTourFirstNext.h>
#include <vtkm/worklet/contourtree_augmented/processcontourtree/ComputeEulerTourList.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
class EulerTour
{
public:
vtkm::cont::Invoker Invoke;
// The sequence of the edges in the tours is given by succ array
cont::ArrayHandle<Id> succ;
// The tour consists of the directed edges of the contour tree
cont::ArrayHandle<Vec<Id, 2>> edges;
// The first occurance of an vertex
cont::ArrayHandle<Id> first;
// Compute the Euler Tour with root 0
void computeEulerTour(const IdArrayType::ReadPortalType superarcs)
{
//
// Make a list of all directed edges in the tree
//
edges.Allocate(2 * (superarcs.GetNumberOfValues() - 1));
int counter = 0;
for (int i = 0; i < superarcs.GetNumberOfValues(); i++)
{
Id j = MaskedIndex(superarcs.Get(i));
if (j != 0)
{
edges.WritePortal().Set(counter++, { i, j });
edges.WritePortal().Set(counter++, { j, i });
}
}
vtkm::cont::Algorithm::Sort(edges, vtkm::SortLess());
//
// Initialize first and next arrays. They are used to compute the ciculer linked list that is the euler tour
//
//cont::ArrayHandle<Id> first;
cont::ArrayHandle<Id> next;
vtkm::cont::ArrayCopy(
vtkm::cont::make_ArrayHandle(std::vector<Id>(
static_cast<unsigned long>(superarcs.GetNumberOfValues()), NO_SUCH_ELEMENT)),
first);
vtkm::cont::ArrayCopy(
vtkm::cont::make_ArrayHandle(
std::vector<Id>(static_cast<unsigned long>(edges.GetNumberOfValues()), NO_SUCH_ELEMENT)),
next);
//
// Compute First and Next arrays that are needed to compute the euler tour linked list
//
vtkm::worklet::contourtree_augmented::process_contourtree_inc::ComputeEulerTourFirstNext
eulerWorklet;
this->Invoke(eulerWorklet, edges, first, next);
//
// Compute the euler tour as a circular linked list from the first and next arrays
//
succ.Allocate(edges.GetNumberOfValues());
vtkm::worklet::contourtree_augmented::process_contourtree_inc::ComputeEulerTourList
eulerTourListWorklet;
this->Invoke(eulerTourListWorklet, next, first, edges, succ);
}
// Reroot the euler tour at a different root (O(n) for finding the first occurence of the new root and O(1) for rerouting and O(n) for returning it as an array)
void getTourAtRoot(const Id root, const cont::ArrayHandle<Vec<Id, 2>>::WritePortalType tourEdges)
{
//
// Reroot at the global min/max
//
Id i = 0;
Id start = NO_SUCH_ELEMENT;
do
{
if (edges.ReadPortal().Get(i)[0] == root)
{
start = i;
break;
}
i = succ.WritePortal().Get(i);
} while (i != 0);
//
// Convert linked list to array handle array
//
{
int counter = 0;
i = start;
do
{
tourEdges.Set(counter++, edges.ReadPortal().Get(i));
i = succ.WritePortal().Get(i);
} while (i != start);
}
}
}; // class EulerTour
} // namespace process_contourtree_inc
} // namespace contourtree_augmented
} // worklet
} // vtkm
#endif

@ -69,6 +69,213 @@ namespace contourtree_augmented
namespace process_contourtree_inc
{
class InitialiseArcsVolume : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _3;
vtkm::Id totalVolume;
VTKM_EXEC_CONT InitialiseArcsVolume(vtkm::Id _totalVolume)
: totalVolume(_totalVolume)
{
}
template <typename IdWholeArrayInPortalType, typename EdgeWholeArrayInOutPortal>
VTKM_EXEC void operator()(const vtkm::Id currentId,
const IdWholeArrayInPortalType& hypersweepSumValuesPortal,
const IdWholeArrayInPortalType& superarcIntrinsicWeightPortal,
const IdWholeArrayInPortalType& superarcsPortal,
const EdgeWholeArrayInOutPortal& arcsPortal) const
{
Id i = currentId;
Id parent = MaskedIndex(superarcsPortal.Get(i));
if (parent == 0)
{
// We expect the root to the last vertex in the supernodes array
assert(i != superarcsPortal.GetNumberOfValues() - 2);
return;
}
EdgeDataVolume edge;
edge.i = i;
edge.j = parent;
edge.upEdge = IsAscending((superarcsPortal.Get(i)));
edge.subtreeVolume =
(totalVolume - hypersweepSumValuesPortal.Get(i)) + (superarcIntrinsicWeightPortal.Get(i) - 1);
EdgeDataVolume oppositeEdge;
oppositeEdge.i = parent;
oppositeEdge.j = i;
oppositeEdge.upEdge = !edge.upEdge;
oppositeEdge.subtreeVolume = hypersweepSumValuesPortal.Get(i);
arcsPortal.Set(i * 2, edge);
arcsPortal.Set(i * 2 + 1, oppositeEdge);
}
};
class SetFirstVertexForSuperparent : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3);
using InputDomain = _1;
VTKM_EXEC_CONT SetFirstVertexForSuperparent() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(
const vtkm::Id sortedNode,
const IdWholeArrayInPortalType& nodesPortal,
const IdWholeArrayInPortalType& superparentsPortal,
const IdWholeArrayInOutPortalType& firstVertexForSuperparentPortal) const
{
vtkm::Id sortID = nodesPortal.Get(sortedNode);
vtkm::Id superparent = superparentsPortal.Get(sortID);
if (sortedNode == 0)
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
else if (superparent != superparentsPortal.Get(nodesPortal.Get(sortedNode - 1)))
{
firstVertexForSuperparentPortal.Set(superparent, sortedNode);
}
}
};
class ComputeIntrinsicWeight : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayIn, WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _2;
VTKM_EXEC_CONT ComputeIntrinsicWeight() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(const vtkm::Id superarc,
const IdWholeArrayInPortalType& arcsPortal,
const IdWholeArrayInPortalType& superarcsPortal,
const IdWholeArrayInPortalType& firstVertexForSuperparentPortal,
const IdWholeArrayInOutPortalType& superarcIntrinsicWeightPortal) const
{
if (superarc == superarcsPortal.GetNumberOfValues() - 1)
{
superarcIntrinsicWeightPortal.Set(
superarc, arcsPortal.GetNumberOfValues() - firstVertexForSuperparentPortal.Get(superarc));
}
else
{
superarcIntrinsicWeightPortal.Set(superarc,
firstVertexForSuperparentPortal.Get(superarc + 1) -
firstVertexForSuperparentPortal.Get(superarc));
}
}
};
class SetFirstSupernodePerIteration : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn, WholeArrayInOut);
typedef void ExecutionSignature(InputIndex, _1, _2);
using InputDomain = _1;
VTKM_EXEC_CONT SetFirstSupernodePerIteration() {}
template <typename IdWholeArrayInPortalType, typename IdWholeArrayInOutPortalType>
VTKM_EXEC void operator()(
const vtkm::Id supernode,
const IdWholeArrayInPortalType& whenTransferredPortal,
const IdWholeArrayInOutPortalType& firstSupernodePerIterationPortal) const
{
vtkm::Id when = MaskedIndex(whenTransferredPortal.Get(supernode));
if (supernode == 0)
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
else if (when != MaskedIndex(whenTransferredPortal.Get(supernode - 1)))
{
firstSupernodePerIterationPortal.Set(when, supernode);
}
}
};
template <typename Operator>
class AddDependentWeightHypersweep : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn iterationHypernodes,
WholeArrayIn hypernodes,
WholeArrayIn hyperarcs,
WholeArrayIn howManyUsed,
AtomicArrayInOut minMaxIndex);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4, _5);
using InputDomain = _1;
Operator op;
// Default Constructor
VTKM_EXEC_CONT AddDependentWeightHypersweep(Operator _op)
: op(_op)
{
}
template <typename IdWholeArrayHandleCountingIn,
typename IdWholeArrayIn,
typename IdWholeArrayInOut>
VTKM_EXEC void operator()(const vtkm::Id hyperarcId,
const IdWholeArrayHandleCountingIn& iterationHypernodesPortal,
const IdWholeArrayIn& hypernodesPortal,
const IdWholeArrayIn& hyperarcsPortal,
const IdWholeArrayIn& howManyUsedPortal,
const IdWholeArrayInOut& minMaxIndexPortal) const
{
Id i = iterationHypernodesPortal.Get(hyperarcId);
// If it's the last hyperacs (there's nothing to do it's just the root)
if (i >= hypernodesPortal.GetNumberOfValues() - 1)
{
return;
}
//
// The value of the prefix scan is now accumulated in the last supernode of the hyperarc. Transfer is to the target
//
vtkm::Id lastSupernode = MaskedIndex(hypernodesPortal.Get(i + 1)) - howManyUsedPortal.Get(i);
//
// Transfer the accumulated value to the target supernode
//
Id vertex = lastSupernode - 1;
Id parent = MaskedIndex(hyperarcsPortal.Get(i));
Id vertexValue = minMaxIndexPortal.Get(vertex);
Id parentValue = minMaxIndexPortal.Get(parent);
//Id writeValue = op(vertexValue, parentValue);
vtkm::Int32 cur = minMaxIndexPortal.Get(parent); // Load the current value at idx
vtkm::Int32 newVal; // will hold the result of the multiplication
vtkm::Int32 expect; // will hold the expected value before multiplication
do
{
expect = cur; // Used to ensure the value hasn't changed since reading
newVal = op(cur, vertexValue); // the actual multiplication
} while ((cur = minMaxIndexPortal.CompareAndSwap(parent, newVal, expect)) != expect);
//minMaxIndexPortal.Set(parent, writeValue);
}
}; // ComputeMinMaxValues
class InitialiseArcs : public vtkm::worklet::WorkletMapField
{
@ -142,19 +349,16 @@ public:
edge.subtreeMax = globalMaxSortedIndex;
}
// Compensate for the missing edge where the root is
// We technically don't need this
if (i > rootSupernodeId)
{
i--;
}
// We cannot use i here because one of the vertices is skipped (the root one and we don't know where it is)
arcsPortal.Set(i * 2, edge);
arcsPortal.Set(i * 2 + 1, oppositeEdge);
}
}; // ComputeMinMaxValues
};
class ComputeSubtreeHeight : public vtkm::worklet::WorkletMapField
@ -467,8 +671,6 @@ public:
}; // ComputeMinMaxValues
template <typename Operator>
class IncorporateParent : public vtkm::worklet::WorkletMapField
{
@ -504,8 +706,6 @@ public:
}; // ComputeMinMaxValues
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet

@ -1,147 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_inc_prefix_scan_hyperarcs_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_inc_prefix_scan_hyperarcs_h
#include <vtkm/BinaryOperators.h>
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
*
* This code is written by Petar Hristov in 03.2020
*
* It does pointer doubling on an array.
*
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
template <typename Operator>
class PrefixScanHyperarcs : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn iterationHypernodes,
WholeArrayIn hypernodes,
WholeArrayIn howManyUsed,
WholeArrayInOut minMaxIndex);
typedef void ExecutionSignature(InputIndex, _1, _2, _3, _4);
using InputDomain = _1;
Operator op;
// Default Constructor
VTKM_EXEC_CONT PrefixScanHyperarcs(Operator _op)
: op(_op)
{
}
template <typename IdWholeArrayHandleCountingIn,
typename IdWholeArrayIn,
typename IdWholeArrayInOut>
VTKM_EXEC void operator()(const vtkm::Id hyperarcId,
const IdWholeArrayHandleCountingIn& iterationHypernodes,
const IdWholeArrayIn& hypernodesPortal,
const IdWholeArrayIn& howManyUsedPortal,
const IdWholeArrayInOut& minMaxIndexPortal) const
{
int i = iterationHypernodes.Get(hyperarcId);
// If it's the last hyperacs (there's nothing to do it's just the root)
if (i >= hypernodesPortal.GetNumberOfValues() - 1)
{
return;
}
vtkm::Id firstSupernode = MaskedIndex(hypernodesPortal.Get(i));
vtkm::Id lastSupernode = MaskedIndex(hypernodesPortal.Get(i + 1)) - howManyUsedPortal.Get(i);
//
// Prefix scan along the hyperarc chain. Parallel prefix scan.
//
int threshold = 1000;
//if (lastSupernode - firstSupernode > threshold)
//{
////auto subarray = vtkm::cont::make_ArrayHandleView(minMaxIndexPortal, firstSupernode, lastSupernode - firstSupernode);
////vtkm::cont::Algorithm::ScanInclusive(subarray, subarray, vtkm::Maximum());
//}
//else
{
for (Id j = firstSupernode + 1; j < lastSupernode; j++)
{
Id vertex = j - 1;
Id parent = j;
Id vertexValue = minMaxIndexPortal.Get(vertex);
Id parentValue = minMaxIndexPortal.Get(parent);
minMaxIndexPortal.Set(parent, op(vertexValue, parentValue));
}
}
}
}; // ComputeMinMaxValues
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif

@ -1,133 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtk_m_worklet_contourtree_augmented_process_contourtree_sweep_hyperarcs_h
#define vtk_m_worklet_contourtree_augmented_process_contourtree_sweep_hyperarcs_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
/*
*
* This code is written by Petar Hristov in 03.2020
*
* It does pointer doubling on an array.
*
*
*/
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace process_contourtree_inc
{
template <class BinaryFunctor>
class SweepHyperarcs : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(WholeArrayIn hypernodesPortal,
WholeArrayIn howManyUsedPortal,
WholeArrayInOut minMaxIndexPortal);
typedef void ExecutionSignature(InputIndex, _1, _2, _3);
using InputDomain = _1;
BinaryFunctor operation;
// Default Constructor
VTKM_EXEC_CONT SweepHyperarcs(BinaryFunctor _operation)
: operation(_operation)
{
}
template <typename HypernodesPortalType,
typename HowManyUsedPortalType,
typename MinMaxIndexPortalType>
VTKM_EXEC void operator()(const vtkm::Id hypernode,
const HypernodesPortalType& hypernodesPortal,
const HowManyUsedPortalType& howManyUsedPortal,
const MinMaxIndexPortalType& minMaxIndexPortal) const
{
// If it's the last hyperacs (there's nothing to do it's just the root)
//if (hypernode >= hypernodesPortal.GetNumberOfValues() - 1) { return; }
vtkm::Id firstSupernode = MaskedIndex(hypernodesPortal.Get(hypernode));
vtkm::Id lastSupernode =
MaskedIndex(hypernodesPortal.Get(hypernode + 1)) - howManyUsedPortal.Get(hypernode);
//auto subarray = vtkm::cont::make_ArrayHandleView(minMaxIndex, firstSupernode, lastSupernode - firstSupernode);
//vtkm::cont::Algorithm::ScanInclusive(subarray, subarray, operation);
//
// Prefix scan along the hyperarc chain. Parallel prefix scan.
//
for (Id j = firstSupernode + 1; j < lastSupernode; j++)
{
Id vertex = j - 1;
Id parent = j;
Id vertexValue = minMaxIndexPortal.Get(vertex);
Id parentValue = minMaxIndexPortal.Get(parent);
minMaxIndexPortal.Set(parent, operation(vertexValue, parentValue));
}
}
}; // ComputeMinMaxValues
} // process_contourtree_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif