Compare commits

...

7 Commits

Author SHA1 Message Date
Gunther Weber
0beaf5adf3 Merge branch 'remove-contour-tree-test' into 'master'
Draft: Removed duplicate tests for contour tree

See merge request vtk/vtk-m!3232
2024-06-29 02:29:12 -04:00
Kenneth Moreland
fc570a75a5 Merge branch 'release' into master 2024-06-28 11:10:45 -04:00
Kenneth Moreland
cb07d8400c Merge branch 'release-2.0' into release 2024-06-28 11:10:45 -04:00
Kenneth Moreland
f610044d79 Merge topic 'split-contour-bench-2-1' into release-2.0
6f5f65487 Split the contour benchmark into structured/unstructured

Acked-by: Kitware Robot <kwrobot@kitware.com>
Acked-by: Vicente Bolea <vicente.bolea@kitware.com>
Merge-request: !3239
2024-06-28 11:10:45 -04:00
Kenneth Moreland
6f5f654878 Split the contour benchmark into structured/unstructured
We've been having problems with PerformanceTestBenchContour. In the last
few iteration, the runtime goes way up. We cannot find any reason for
this in the source code. There don't appear to be any particular
problems with memory or tables. The best we can figure is an issue with
the device hardware in the container.

The easy solution should be to break the benchmark into smaller peices
to avoid the problem.
2024-06-26 17:47:27 -04:00
Gunther H. Weber
1ad0acd3a6 Removed no longer required test LFS files. 2024-06-07 14:50:32 -07:00
Gunther H. Weber
57d1c4d9df Removed duplicate tests for contour tree 2024-06-06 14:46:58 -07:00
10 changed files with 0 additions and 538 deletions

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:aa4623c6a5f1051038cfdafb9167a1e2625091063e5ee8c1247af45539189eea
size 2657

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ef15b2a0e8ae45c80c1e55cc8aadb0bbd725d39cf2fb091c2b3dde9d601e3930
size 1445

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4ec4d071805a7300bf61aa2c017fe5c9892ad10df7495ef40c69799d58c84db8
size 1522

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c5c3afc7bdb0fa75bad78fb69ce0557c752b98cab6f000d757ef2706a4498576
size 2116

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e31bdb91419a32e98dc1061ae6ce348f8fbc6c79b26a4e0bcd352d2b2e62b6a8
size 2228

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ccbe5767ce622b4c0e5e93dd6f6243d5c76a5a6f07c1e5cf43fb2b5e2e069255
size 2076

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6e9f0913c17a3a12338d043c455c6e9cde51c72fa70d619d935fa4369effed45
size 2316

@ -10,14 +10,12 @@
set(headers
GenerateTestDataSets.h
TestingContourTreeUniformDistributedLoadArrays.h
)
set(unit_tests
UnitTestAverageByKey.cxx
UnitTestBoundingIntervalHierarchy.cxx
UnitTestCellDeepCopy.cxx
UnitTestContourTreeUniformDistributed.cxx
UnitTestCosmoTools.cxx
UnitTestDescriptiveStatistics.cxx
UnitTestDispatcherBase.cxx

@ -1,120 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
#ifndef vtk_m_worklet_testing_contourtree_distributed_load_arrays_h
#define vtk_m_worklet_testing_contourtree_distributed_load_arrays_h
#include <vtkm/Types.h>
#include <vtkm/cont/ArrayHandle.h>
namespace vtkm
{
namespace worklet
{
namespace testing
{
namespace contourtree_distributed
{
// Types used in binary test files
typedef size_t FileSizeType;
typedef unsigned long long FileIndexType;
const FileIndexType FileIndexMask = 0x07FFFFFFFFFFFFFFLL;
typedef double FileDataType;
inline void ReadIndexArray(std::ifstream& is, vtkm::cont::ArrayHandle<vtkm::Id>& indexArray)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading index array of size " << sz << std::endl;
indexArray.Allocate(sz);
auto writePortal = indexArray.WritePortal();
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
FileIndexType x;
is.read(reinterpret_cast<char*>(&x), sizeof(x));
// Covert from index type size in file (64 bit) to index type currently used by
// shifting the flag portion of the index accordingly
vtkm::Id shiftedFlagVal = (x & FileIndexMask) |
((x & ~FileIndexMask) >> ((sizeof(FileIndexType) - sizeof(vtkm::Id)) << 3));
writePortal.Set(i, shiftedFlagVal);
}
}
inline void ReadIndexArrayVector(std::ifstream& is,
std::vector<vtkm::cont::ArrayHandle<vtkm::Id>>& indexArrayVector)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading vector of " << sz << " index arrays" << std::endl;
indexArrayVector.resize(sz);
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
ReadIndexArray(is, indexArrayVector[i]);
}
}
template <class FieldType>
inline void ReadDataArray(std::ifstream& is, vtkm::cont::ArrayHandle<FieldType>& dataArray)
{
FileSizeType sz;
is.read(reinterpret_cast<char*>(&sz), sizeof(sz));
//std::cout << "Reading data array of size " << sz << std::endl;
dataArray.Allocate(sz);
auto writePortal = dataArray.WritePortal();
for (vtkm::Id i = 0; i < static_cast<vtkm::Id>(sz); ++i)
{
FileDataType x;
is.read(reinterpret_cast<char*>(&x), sizeof(x));
//std::cout << "Read " << x << std::endl;
writePortal.Set(
i,
FieldType(x)); // Test data is stored as double but generally is also ok to be cast to float.
}
}
} // namespace contourtree_distributed
} // namespace testing
} // namespace worklet
} // namespace vtkm
#endif

@ -1,395 +0,0 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
// #define DEBUG_PRINT
// #define PRINT_RESULT
#include <vtkm/cont/testing/MakeTestDataSet.h>
#include <vtkm/cont/testing/Testing.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/DataSetMesh.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/PrintVectors.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_augmented/meshtypes/ContourTreeMesh.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_distributed/CombineHyperSweepBlockFunctor.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_distributed/HierarchicalContourTree.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_distributed/HierarchicalHyperSweeper.h>
#include <vtkm/filter/scalar_topology/worklet/contourtree_distributed/HyperSweepBlock.h>
#include <vtkm/worklet/testing/TestingContourTreeUniformDistributedLoadArrays.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/diy/diy.h>
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
namespace
{
template <typename FieldType>
void LoadHierarchicalContourTree(
const char* filename,
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<FieldType>& ht)
{
using vtkm::worklet::testing::contourtree_distributed::ReadIndexArray;
using vtkm::worklet::testing::contourtree_distributed::ReadIndexArrayVector;
//template <typename FieldType>
using vtkm::worklet::testing::contourtree_distributed::ReadDataArray; //<FieldType>;
std::ifstream is(filename, std::ios_base::binary);
ReadIndexArray(is, ht.RegularNodeGlobalIds);
ReadDataArray<FieldType>(is, ht.DataValues);
ReadIndexArray(is, ht.RegularNodeSortOrder);
ReadIndexArray(is, ht.Regular2Supernode);
ReadIndexArray(is, ht.Superparents);
ReadIndexArray(is, ht.Supernodes);
ReadIndexArray(is, ht.Superarcs);
ReadIndexArray(is, ht.Hyperparents);
ReadIndexArray(is, ht.Super2Hypernode);
ReadIndexArray(is, ht.WhichRound);
ReadIndexArray(is, ht.WhichIteration);
ReadIndexArray(is, ht.Hypernodes);
ReadIndexArray(is, ht.Hyperarcs);
ReadIndexArray(is, ht.Superchildren);
int nRounds;
is.read(reinterpret_cast<char*>(&nRounds), sizeof(nRounds));
//std::cout << "nRounds = " << nRounds << std::endl;
ht.NumRounds = nRounds;
//ht.NumOwnedRegularVertices = 0;
ReadIndexArray(is, ht.NumRegularNodesInRound);
ReadIndexArray(is, ht.NumSupernodesInRound);
ReadIndexArray(is, ht.NumHypernodesInRound);
ReadIndexArray(is, ht.NumIterations);
ReadIndexArrayVector(is, ht.FirstSupernodePerIteration);
ReadIndexArrayVector(is, ht.FirstHypernodePerIteration);
}
template <typename FieldType>
void TestContourTreeMeshCombine(const std::string& mesh1_filename,
const std::string& mesh2_filename,
const std::string& combined_filename)
{
std::cout << "Testing combining meshes " << mesh1_filename << " " << mesh2_filename
<< " with expected result " << combined_filename << std::endl;
vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType> contourTreeMesh1;
contourTreeMesh1.Load(mesh1_filename.c_str());
vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType> contourTreeMesh2;
contourTreeMesh2.Load(mesh2_filename.c_str());
contourTreeMesh2.MergeWith(contourTreeMesh1);
// Result is written to contourTreeMesh2
vtkm::worklet::contourtree_augmented::ContourTreeMesh<FieldType> combinedContourTreeMesh;
combinedContourTreeMesh.Load(combined_filename.c_str());
VTKM_TEST_ASSERT(
test_equal_ArrayHandles(contourTreeMesh2.SortedValues, combinedContourTreeMesh.SortedValues));
VTKM_TEST_ASSERT(test_equal_ArrayHandles(contourTreeMesh2.GlobalMeshIndex,
combinedContourTreeMesh.GlobalMeshIndex));
VTKM_TEST_ASSERT(test_equal_ArrayHandles(contourTreeMesh2.GlobalMeshIndex,
combinedContourTreeMesh.GlobalMeshIndex));
VTKM_TEST_ASSERT(test_equal_ArrayHandles(contourTreeMesh2.NeighborConnectivity,
combinedContourTreeMesh.NeighborConnectivity));
VTKM_TEST_ASSERT(test_equal_ArrayHandles(contourTreeMesh2.NeighborOffsets,
combinedContourTreeMesh.NeighborOffsets));
VTKM_TEST_ASSERT(contourTreeMesh2.NumVertices == combinedContourTreeMesh.NumVertices);
VTKM_TEST_ASSERT(contourTreeMesh2.MaxNeighbors == combinedContourTreeMesh.MaxNeighbors);
}
void TestHierarchicalHyperSweeper()
{
std::cout << "Testing HierarchicalHyperSweeper" << std::endl;
using vtkm::cont::testing::Testing;
using ContourTreeDataFieldType = vtkm::FloatDefault;
// Test input
const int numBlocks = 4;
const char* filenames[numBlocks] = { "misc/8x9test_HierarchicalAugmentedTree_Block0.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block1.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block2.dat",
"misc/8x9test_HierarchicalAugmentedTree_Block3.dat" };
vtkm::Id3 globalSize{ 9, 8, 1 };
vtkm::Id3 sizes[numBlocks] = { { 5, 4, 1 }, { 5, 5, 1 }, { 5, 4, 1 }, { 5, 5, 1 } };
vtkm::Id3 origins[numBlocks] = { { 0, 0, 0 }, { 0, 3, 0 }, { 4, 0, 0 }, { 4, 3, 0 } };
vtkm::Id3 blockIndices[numBlocks] = { { 0, 0, 0 }, { 0, 1, 0 }, { 1, 0, 0 }, { 1, 1, 0 } };
// Expected output
vtkm::cont::ArrayHandle<vtkm::Id> expectedIntrinsicVolume[numBlocks] = {
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 8, 24, 20, 1, 2 })
};
vtkm::cont::ArrayHandle<vtkm::Id> expectedDependentVolume[numBlocks] = {
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 1 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72 }),
vtkm::cont::make_ArrayHandle<vtkm::Id>({ 6, 9, 18, 24, 46, 72, 2 })
};
// Load trees
vtkm::worklet::contourtree_distributed::HierarchicalContourTree<vtkm::FloatDefault>
hct[numBlocks];
for (vtkm::Id blockNo = 0; blockNo < numBlocks; ++blockNo)
{
LoadHierarchicalContourTree(Testing::DataPath(filenames[blockNo]).c_str(), hct[blockNo]);
#ifdef DEBUG_PRINT
std::cout << hct[blockNo].DebugPrint("AfterLoad", __FILE__, __LINE__);
#endif
}
// Create and add DIY blocks
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
vtkm::Id rank = comm.rank();
vtkmdiy::Master master(comm,
1, // Use 1 thread, VTK-M will do the treading
-1 // All block in memory
);
// Set up connectivity
using RegularDecomposer = vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>;
RegularDecomposer::BoolVector shareFace(3, true);
RegularDecomposer::BoolVector wrap(3, false);
RegularDecomposer::CoordinateVector ghosts(3, 1);
RegularDecomposer::DivisionsVector diyDivisions{ 2, 2, 1 }; // HARDCODED FOR TEST
int numDims = 2;
vtkmdiy::DiscreteBounds diyBounds(2);
diyBounds.min[0] = diyBounds.min[1] = 0;
diyBounds.max[0] = static_cast<int>(globalSize[0]);
diyBounds.max[1] = static_cast<int>(globalSize[1]);
RegularDecomposer decomposer(
numDims, diyBounds, numBlocks, shareFace, wrap, ghosts, diyDivisions);
// ... coordinates of local blocks
std::vector<int> vtkmdiyLocalBlockGids(numBlocks);
for (vtkm::Id bi = 0; bi < numBlocks; bi++)
{
RegularDecomposer::DivisionsVector diyCoords(static_cast<size_t>(numDims));
auto currentCoords = blockIndices[bi];
for (vtkm::IdComponent d = 0; d < numDims; ++d)
{
diyCoords[d] = static_cast<int>(currentCoords[d]);
}
vtkmdiyLocalBlockGids[static_cast<size_t>(bi)] =
RegularDecomposer::coords_to_gid(diyCoords, diyDivisions);
}
// Define which blocks live on which rank so that vtkmdiy can manage them
vtkmdiy::DynamicAssigner assigner(comm, comm.size(), numBlocks);
for (vtkm::Id bi = 0; bi < numBlocks; bi++)
{
assigner.set_rank(static_cast<int>(rank),
static_cast<int>(vtkmdiyLocalBlockGids[static_cast<size_t>(bi)]));
}
vtkmdiy::fix_links(master, assigner);
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>*
localHyperSweeperBlocks[numBlocks];
for (vtkm::Id blockNo = 0; blockNo < numBlocks; ++blockNo)
{
localHyperSweeperBlocks[blockNo] =
new vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>(
blockNo,
vtkmdiyLocalBlockGids[blockNo],
origins[blockNo],
sizes[blockNo],
globalSize,
hct[blockNo]);
master.add(
vtkmdiyLocalBlockGids[blockNo], localHyperSweeperBlocks[blockNo], new vtkmdiy::Link());
}
master.foreach (
[](vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"Before initializing HyperSweeper", __FILE__, __LINE__);
#endif
// Create HyperSweeper
vtkm::worklet::contourtree_distributed::HierarchicalHyperSweeper<vtkm::Id,
ContourTreeDataFieldType>
hyperSweeper(
b->GlobalBlockId, b->HierarchicalContourTree, b->IntrinsicVolume, b->DependentVolume);
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After initializing HyperSweeper", __FILE__, __LINE__);
#endif
// Create mesh and initialize vertex counts
vtkm::worklet::contourtree_augmented::mesh_dem::IdRelabeler idRelabeler{ b->Origin,
b->Size,
b->GlobalSize };
if (b->GlobalSize[2] <= 1)
{
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation2DFreudenthal mesh(
vtkm::Id2{ b->Size[0], b->Size[1] });
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
else
{
// TODO/FIXME: For getting owned vertices, it should not make a difference if marching
// cubes or not. Verify.
vtkm::worklet::contourtree_augmented::DataSetMeshTriangulation3DFreudenthal mesh(b->Size);
hyperSweeper.InitializeIntrinsicVertexCount(
b->HierarchicalContourTree, mesh, idRelabeler, b->IntrinsicVolume);
}
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After initializing intrinsic vertex count", __FILE__, __LINE__);
#endif
// Initialize dependentVolume by copy from intrinsicVolume
vtkm::cont::Algorithm::Copy(b->IntrinsicVolume, b->DependentVolume);
// Perform the local hypersweep
hyperSweeper.LocalHyperSweep();
#ifdef DEBUG_PRINT
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << b->HierarchicalContourTree.DebugPrint(
"After local hypersweep", __FILE__, __LINE__);
#endif
});
// Reduce
// partners for merge over regular block grid
vtkmdiy::RegularSwapPartners partners(
decomposer, // domain decomposition
2, // radix of k-ary reduction.
true // contiguous: true=distance doubling, false=distance halving
);
vtkmdiy::reduce(master,
assigner,
partners,
vtkm::worklet::contourtree_distributed::CobmineHyperSweepBlockFunctor<
ContourTreeDataFieldType>{});
#ifdef PRINT_RESULT
// Print
vtkm::Id totalVolume = globalSize[0] * globalSize[1] * globalSize[2];
master.foreach (
[&totalVolume](
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
std::cout << "Block " << b->GlobalBlockId << std::endl;
std::cout << "=========" << std::endl;
vtkm::worklet::contourtree_augmented::PrintHeader(b->IntrinsicVolume.GetNumberOfValues(),
std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, std::cout);
std::cout << b->HierarchicalContourTree.DebugPrint(
"Called from DumpVolumes", __FILE__, __LINE__);
std::cout << vtkm::worklet::contourtree_distributed::HierarchicalContourTree<
ContourTreeDataFieldType>::DumpVolumes(b->HierarchicalContourTree.Supernodes,
b->HierarchicalContourTree.Superarcs,
b->HierarchicalContourTree.RegularNodeGlobalIds,
totalVolume,
b->IntrinsicVolume,
b->DependentVolume);
});
#endif
// Compare to expected results
master.foreach (
[&expectedIntrinsicVolume, &expectedDependentVolume](
vtkm::worklet::contourtree_distributed::HyperSweepBlock<ContourTreeDataFieldType>* b,
const vtkmdiy::Master::ProxyWithLink&) {
#ifdef DEBUG_PRINT
vtkm::worklet::contourtree_augmented::PrintIndices(
"Intrinsic Volume", b->IntrinsicVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Expected Intrinsic Volume", expectedIntrinsicVolume[b->GlobalBlockId], -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Dependent Volume", b->DependentVolume, -1, std::cout);
vtkm::worklet::contourtree_augmented::PrintIndices(
"Expected Dependent Volume", expectedDependentVolume[b->GlobalBlockId], -1, std::cout);
#endif
VTKM_TEST_ASSERT(test_equal_portals(expectedIntrinsicVolume[b->GlobalBlockId].ReadPortal(),
b->IntrinsicVolume.ReadPortal()));
VTKM_TEST_ASSERT(test_equal_portals(expectedDependentVolume[b->GlobalBlockId].ReadPortal(),
b->DependentVolume.ReadPortal()));
});
// Clean-up
for (auto b : localHyperSweeperBlocks)
{
delete b;
}
}
void TestContourTreeUniformDistributed()
{
using vtkm::cont::testing::Testing;
TestContourTreeMeshCombine<vtkm::FloatDefault>(
Testing::DataPath("misc/5x6_7_MC_Rank0_Block0_Round1_BeforeCombineMesh1.ctm"),
Testing::DataPath("misc/5x6_7_MC_Rank0_Block0_Round1_BeforeCombineMesh2.ctm"),
Testing::RegressionImagePath("5x6_7_MC_Rank0_Block0_Round1_CombinedMesh.ctm"));
TestHierarchicalHyperSweeper();
}
} // anonymous namespace
int UnitTestContourTreeUniformDistributed(int argc, char* argv[])
{
return vtkm::cont::testing::Testing::Run(TestContourTreeUniformDistributed, argc, argv);
}