From e9fcd7ca782a7f9dd7b42d42fcf0085a8ae2488c Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 13 Dec 2021 03:35:04 -0800 Subject: [PATCH 01/17] Moved ContourTree I/O to new module and added HDF5 I/O --- .../contour_tree_distributed/CMakeLists.txt | 5 +- .../ContourTreeApp.cxx | 564 ++++-------- .../ContourTreeAppDataIO.h | 806 ++++++++++++++++++ 3 files changed, 954 insertions(+), 421 deletions(-) create mode 100644 examples/contour_tree_distributed/ContourTreeAppDataIO.h diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 6c146ac3a..57997fdb6 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(VTKm REQUIRED QUIET) # MPI #################################### if (VTKm_ENABLE_MPI) - add_executable(ContourTree_Distributed ContourTreeApp.cxx) + add_executable(ContourTree_Distributed ContourTreeApp.cxx ContourTreeAppDataIO.h) target_link_libraries(ContourTree_Distributed vtkm_filter vtkm_io MPI::MPI_CXX) vtkm_add_target_information(ContourTree_Distributed MODIFY_CUDA_FLAGS @@ -76,6 +76,9 @@ if (VTKm_ENABLE_MPI) if (TARGET vtkm::tbb) target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_SET_NUM_THREADS") endif() + if (VTKm_ENABLE_HDF5_IO) + target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_HDFIO") + endif () add_executable(TreeCompiler TreeCompilerApp.cxx) target_link_libraries(TreeCompiler vtkm_filter) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index b857a77cd..d51e6f110 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -71,6 +71,8 @@ #include #include +#include "ContourTreeAppDataIO.h" + #include #include #include @@ -272,6 +274,48 @@ int main(int argc, char* argv[]) } } +#ifdef ENABLE_HDFIO + std::string dataset_name = "data"; + if (parser.hasOption("--dataset")) + { + dataset_name = parser.getOption("--dataset"); + } + + vtkm::Id3 blocksPerDimIn(1, 1, size); + if (parser.hasOption("--blocksPerDim")) + { + std::string temp = parser.getOption("--blocksPerDim"); + if (std::count(temp.begin(), temp.end(), ',') != 2) + { + std::cerr << "Invalid --blocksPerDim option. Expected string of the form 'x,y,z' got" << temp + << std::endl; + MPI_Finalize(); + return EXIT_FAILURE; + } + char* tempC = (char*)temp.c_str(); + blocksPerDimIn[0] = std::stoi(std::strtok(tempC, ",")); + blocksPerDimIn[1] = std::stoi(std::strtok(nullptr, ",")); + blocksPerDimIn[2] = std::stoi(std::strtok(nullptr, ",")); + } + + vtkm::Id3 selectSize(-1, -1, -1); + if (parser.hasOption("--selectSize")) + { + std::string temp = parser.getOption("--selectSize"); + if (std::count(temp.begin(), temp.end(), ',') != 2) + { + std::cerr << "Invalid --selectSize option. Expected string of the form 'x,y,z' got" << temp + << std::endl; + MPI_Finalize(); + return EXIT_FAILURE; + } + char* tempC = (char*)temp.c_str(); + selectSize[0] = std::stoi(std::strtok(tempC, ",")); + selectSize[1] = std::stoi(std::strtok(nullptr, ",")); + selectSize[2] = std::stoi(std::strtok(nullptr, ",")); + } +#endif + if (argc < 2 || parser.hasOption("--help") || parser.hasOption("-h")) { if (rank == 0) @@ -307,11 +351,24 @@ int main(int argc, char* argv[]) << " computation (Default=False). " << std::endl; std::cout << "--saveOutputData Save data files with hierarchical tree or volume data" << std::endl; - std::cout << "--numBlocks Number of blocks to use during computation " + std::cout << "--numBlocks Number of blocks to use during computation. (Sngle block " + "ASCII/BOV file reader only)" << "(Default=number of MPI ranks.)" << std::endl; std::cout << "--forwardSummary Forward the summary timings also to the per-rank " << std::endl << " log files. Default is to round-robin print the " << std::endl << " summary instead" << std::endl; +#ifdef ENABLE_HDFIO + std::cout << "--dataset Name of the dataset to load (HDF5 reader only)(Default=data)" + << std::endl; + std::cout << "--blocksPerDim Number of blocks to split the data into. This is a string of " + "the form 'x,y,z'." + "(HDF5 reader only)(Default='1,1,#ranks')" + << std::endl; + std::cout + << "--selectSize Size of the subblock to read. This is a string of the form 'x,y,z'." + "(HDF5 reader only)(Default='-1,-1,-1')" + << std::endl; +#endif std::cout << std::endl; } MPI_Finalize(); @@ -334,7 +391,15 @@ int main(int argc, char* argv[]) << computeHierarchicalVolumetricBranchDecomposition << std::endl << " saveOutputData=" << saveOutputData << std::endl << " forwardSummary=" << forwardSummary << std::endl - << " nblocks=" << numBlocks << std::endl); + << " nblocks=" << numBlocks << std::endl +#ifdef ENABLE_HDFIO + << " dataset=" << dataset_name << std::endl + << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," + << blocksPerDimIn[2] << std::endl + << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," + << selectSize[2] << std::endl +#endif + ); } // Redirect stdout to file if we are using MPI with Debugging @@ -411,430 +476,90 @@ int main(int argc, char* argv[]) auto localBlockSizesPortal = localBlockSizes.WritePortal(); // Read the pre-split data files + bool readOk = true; if (preSplitFiles) { - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; - - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; - - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); - - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); - - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) - { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - } - - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); - - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); - - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); - - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); - - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); - - localBlockIndicesPortal.Set( - blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(nDims == 3 ? offset[2] : 0) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(nDims == 3 ? dims[2] : 0) }); - - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - - // Print the mesh metadata - if (rank == 0) - { - VTKM_LOG_S(exampleLogLevel, - std::endl - << " ---------------- Input Mesh Properties --------------" << std::endl - << " Number of dimensions: " << nDims << std::endl); - } + readOk = readPreSplitFiles( + // inputs + rank, + filename, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); } // Read single-block data and split it for the ranks else { - vtkm::cont::DataSet inDataSet; - // Currently FloatDefualt would be fine, but it could cause problems if we ever - // read binary files here. - std::vector values; - std::vector dims; - - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) + bool isHDF5 = (0 == filename.compare(filename.length() - 3, 3, ".h5")); + if (isHDF5) { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - // TODO All we should need to do to implement BOV support is to copy the values - // in the values vector and copy the dimensions in the dims vector - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU +#ifdef ENABLE_HDFIO + blocksPerDim = blocksPerDimIn; + readOk = read3DHDF5File( + // inputs + rank, + filename, + dataset_name, + blocksPerRank, + blocksPerDim, + selectSize, + // outputs + nDims, + useDataSet, + globalSize, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); +#else + VTKM_LOG_S(vtkm::cont::LogLevel::Error, + "Can't read HDF5 file. HDF5 reader disabled for this build."); + readOk = false; +#endif } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; + readOk = readSingleBlockFile( + // inputs + rank, + size, + filename, + numBlocks, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); + } + if (!readOk) + { + MPI_Finalize(); + return EXIT_FAILURE; + } - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Print the mesh metadata - if (rank == 0) - { - VTKM_LOG_S(exampleLogLevel, - std::endl - << " ---------------- Input Mesh Properties --------------" << std::endl - << " Number of dimensions: " << nDims); - } - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) - : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - MPI_Finalize(); - return EXIT_FAILURE; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = - nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, (blockStart / blockSliceSize), 0)); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, 0, (blockStart / blockSliceSize))); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - currBlockSize)); - } - - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } + // Print the mesh metadata + if (rank == 0) + { + VTKM_LOG_S(exampleLogLevel, + std::endl + << " ---------------- Input Mesh Properties --------------" << std::endl + << " Number of dimensions: " << nDims); } // Check if marching cubes is enabled for non 3D data @@ -852,9 +577,8 @@ int main(int argc, char* argv[]) return EXIT_FAILURE; } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - prevTime = currTime; + // reset timer after read. the dataReadTime and buildDatasetTime are measured by the read functions + prevTime = totalTime.GetElapsedTime(); // Make sure that all ranks have started up before we start the data read MPI_Barrier(comm); diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h new file mode 100644 index 000000000..20c74dba7 --- /dev/null +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -0,0 +1,806 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// I/O functions used by the ContourTreeApp for data read. +//============================================================================== + +#ifndef vtk_m_examples_ContourTreeAppDataIO_hxx +#define vtk_m_examples_ContourTreeAppDataIO_hxx + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef ENABLE_HDFIO +#include "H5Cpp.h" +using namespace H5; + + + +/// Convert a 3D index of a cube to rank index +vtkm::Id to1DIndex(vtkm::Id3 idx, vtkm::Id3 dims) +{ + return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; +} + +/// Convert the rank index to the index of the cube +vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) +{ + vtkm::Id3 res; + res[2] = idx / (dims[0] * dims[1]); + idx -= (res[2] * dims[0] * dims[1]); + res[1] = idx / dims[0]; + res[0] = idx % dims[0]; + return res; +} + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] dataset_name Name of the dataset in the HDF5 file to read +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[in] blocksPerDim Number of data blocks to use per dimension +/// @param[in] selectSize Select subset of this size from the dataset. Set to (-1,-1,-1) to select the full size +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[out] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[out] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[out] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[out] dataReadTime Time to read the data +/// @param[out] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool read3DHDF5File(const int& mpi_rank, + const std::string& filename, + const std::string& dataset_name, + const int& blocksPerRank, + const vtkm::Id3& blocksPerDim, + const vtkm::Id3 selectSize, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + // TODO not supported yet + if (blocksPerRank > 1) + { + VTKM_LOG_S( + vtkm::cont::LogLevel::Error, + "HDF5 reader for ContourTreeDistributed does not support multiple blocks per rank yet"); + return false; + } + vtkm::Id blockNo = 0; // TODO: Update this if we have multiple blocks per rank + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + + herr_t status; + // Open the file and the dataset + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); + // Get filespace for rank and dimension + hid_t filespace = H5Dget_space(dataset); + // Get number of dimensions in the file dataspace + nDims = H5Sget_simple_extent_ndims(filespace); + if (nDims != 3) + { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, + "HDF5 reader for ContourTreeDistributed requires 3D dataset"); + return false; + } + hsize_t dims[nDims]; // dataset dimensions + globalSize[0] = selectSize[0] < 0 ? dims[0] : selectSize[0]; + globalSize[1] = selectSize[1] < 0 ? dims[1] : selectSize[1]; + globalSize[2] = selectSize[2] < 0 ? dims[2] : selectSize[2]; + // Define the memory space to read dataset. + hid_t dataspace = H5Dget_space(dataset); + // Read a hyperslap + // define the hyperslap + hsize_t count[2]; // size of the hyperslab in the file + hsize_t offset[3]; // hyperslab offset in the file + + // Compute the origin and count + vtkm::Id3 blockSize(vtkm::Id(globalSize[0] / blocksPerDim[0]), + vtkm::Id(globalSize[1] / blocksPerDim[1]), + vtkm::Id(globalSize[2] / blocksPerDim[2])); + vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); + // compute the offset and count for the block for this rank + offset[0] = blockSize[0] * blockIndex[0]; + offset[1] = blockSize[1] * blockIndex[1]; + offset[1] = blockSize[2] * blockIndex[2]; + count[0] = blockSize[0]; + count[1] = blockSize[1]; + count[2] = blockSize[2]; + if (offset[0] + count[0] > globalSize[0]) + { + count[0] = globalSize[0] - offset[0]; + } + if (offset[1] + count[1] > globalSize[1]) + { + count[1] = globalSize[1] - offset[1]; + } + if (offset[2] + count[2] > globalSize[2]) + { + count[2] = globalSize[2] - offset[2]; + } + // Select the data in the dataset + status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); + // Define the memory space for reading + hid_t memspace = H5Screate_simple(nDims, count, NULL); + // Define the hyperslap to read the data into memory + hsize_t offset_out[3]; + offset_out[0] = 0; + offset_out[1] = 0; + offset_out[2] = 0; + status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count, NULL); + // Read data from hyperslab in the file into the hyperslab in + std::size_t numVertices = count[0] * count[1] * count[2]; + std::vector values(numVertices); + { + if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_DOUBLE)) + { + double data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_INT)) + { + int data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_UCHAR)) + { + unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + } + // Release HDF5 resources + H5Sclose(dataspace); + H5Dclose(dataset); + H5Fclose(file); + + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + localBlockIndicesPortal.Set(blockNo, blockIndex); + localBlockOriginsPortal.Set(blockNo, + vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }); + localBlockSizesPortal.Set(blockNo, + vtkm::Id3{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }); + + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return false; +} +#endif + + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; + + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) + { + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } + + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; + + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } + + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); + + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); + + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + { + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } + + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); + + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); + + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); + + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); + + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + } + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); + localBlockOriginsPortal.Set(blockNo, + vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(nDims == 3 ? offset[2] : 0) }); + localBlockSizesPortal.Set(blockNo, + vtkm::Id3{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(nDims == 3 ? dims[2] : 0) }); + + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} + + +/// Read data from a single file and split the data into blocks across ranks +/// This is a simple implementation that will read the full data on all ranks +/// and then extract only the relevant subblock for that rank. +/// The function support reading from BOV as well from ASCII files +/// +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] size The number of MPI ranks +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] numBlocks Number of blocks to use during computation +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; + + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; + + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = + nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = + nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3(0, (blockStart / blockSliceSize), 0)); + localBlockSizesPortal.Set(localBlockIndex, + vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3(0, 0, (blockStart / blockSliceSize))); + localBlockSizesPortal.Set( + localBlockIndex, + vtkm::Id3(static_cast(dims[0]), static_cast(dims[1]), currBlockSize)); + } + + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); + + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} + +#endif From 8d1d713dce576c167395592833c921f03b8b9ebf Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:00:58 -0800 Subject: [PATCH 02/17] Update split data 3D to also support export to HDF5 --- .../contour_tree_distributed/split_data_3d.py | 133 ++++++++++++------ 1 file changed, 90 insertions(+), 43 deletions(-) diff --git a/examples/contour_tree_distributed/split_data_3d.py b/examples/contour_tree_distributed/split_data_3d.py index fa8942b04..83b311c90 100755 --- a/examples/contour_tree_distributed/split_data_3d.py +++ b/examples/contour_tree_distributed/split_data_3d.py @@ -6,16 +6,24 @@ import sys # For readBOV from functools import reduce import operator +try: + import h5py + USE_HDF = True +except: + USE_HDF = False -# Read a 3D text file from disk into a NumPy array -# ... Plain text def read_file(fn): + """ + Read a 3D plain text file from disk into a NumPy array + """ data = np.fromfile(fn, dtype=float, sep=" ") data = data[3:].reshape((int(data[2]),int(data[0]),int(data[1]))) return data -# ... VisItBOV def readBOV(filename): + """ + Read data from a VisIt BOV file + """ with open(filename, 'r') as f: header = dict([(lambda x: (x[0].strip().lower(), x[1].strip()))(l.strip().split(':')) for l in f.readlines()]) if 'data_endian' in header: @@ -32,11 +40,22 @@ def readBOV(filename): return (header['variable'], header['centering'].lower(), np.fromfile(dataname, dtype, count).reshape(tuple(reversed(shape)))) return None -# Save a block from a 3D NumPy array to disk -# Python order is slice, row, col -# TXT file order is row, col, slice -# offset and size are in file order def save_piece(fn, array, offset, n_blocks, block_index, size): + """ + Save a block from a 3D NumPy array to disk. + + Python order is slice, row, col + TXT file order is row, col, slice + offset and size are in file order + + Args: + fn (str): filename + array (np.array) : Array with the full data + offset (tuple) : Tuple of int offsets + n_blocks (tuple) : Tuple of ints with the number of blocks per dimension + block_index (tuple) : Tuple of ints with index of the block + size (tuple) : Tuple of ints with the size of the block in each dimension + """ with open(fn, 'w') as f: perm = [1, 2, 0] f.write('#GLOBAL_EXTENTS ' + ' '.join(map(str, [array.shape[i] for i in perm])) + '\n') @@ -51,51 +70,79 @@ def save_piece(fn, array, offset, n_blocks, block_index, size): np.savetxt(f, array[s, offset[0]:offset[0]+size[0],offset[1]:offset[1]+size[1]], fmt='%.16g') f.write('\n') -# Compute split points for splitting into n blocks def split_points(shape, nblocks): + """ + Compute split points for splitting into n blocks: + + Args: + shape (int): Length of the axis + nblocks (int): Number of blocks to split the axis into + + Return: + List of split points along the axis + """ dx = float(shape-1) / nblocks return [ math.floor(i*dx) for i in range(nblocks)] + [ shape - 1 ] -if len(sys.argv) < 2: - print("Error: Usage split_data_3d.py [| ]", file=sys.stderr) - sys.exit(1) +def save_hdf(filename, data, **kwargs): + """ + Save the data to HDF5. + The axes of the data will be transposed and reorded to match the order of save_piece function. -# Parse parameters -in_filename = sys.argv[1] + Args: + filename (str) : Name fo the HDF5 file + data (np.array): 3D array with the data + kwargs (dict) : Dict with keyword arguments for the h5py create_dataset function + """ + f = h5py.File(filename, 'w') + f.create_dataset(name='data', data=np.swapaxes(np.transpose(data), 0, 1), **kwargs) -name, ext = os.path.splitext(in_filename) -#out_filename_pattern = name + '_split_%d.txt' -out_filename_pattern = sys.argv[2] +if __name__ == '__main__': -n_blocks = (2, 2, 2) -if len(sys.argv) > 3: - if len(sys.argv) >= 6: - n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])) + if len(sys.argv) < 2: + print("Error: Usage split_data_3d.py [| ]", file=sys.stderr) + sys.exit(1) + + # Parse parameters + in_filename = sys.argv[1] + + name, ext = os.path.splitext(in_filename) + #out_filename_pattern = name + '_split_%d.txt' + out_filename_pattern = sys.argv[2] + + n_blocks = (2, 2, 2) + if len(sys.argv) > 3: + if len(sys.argv) >= 6: + n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])) + else: + n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3])) + + # Read data + if ext == '.bov': + data = readBOV(in_filename)[2] else: - n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3])) + data = read_file(in_filename) -# Read data -if ext == '.bov': - data = readBOV(in_filename)[2] -else: - data = read_file(in_filename) + # export to hdf5 as well + if USE_HDF: + save_hdf((out_filename_pattern % 0).replace('.txt', '.h5'), data) -# Python order is slice, row, col -# Compute split points -split_points_s = split_points(data.shape[0], n_blocks[2]) -split_points_r = split_points(data.shape[1], n_blocks[0]) -split_points_c = split_points(data.shape[2], n_blocks[1]) + # Python order is slice, row, col + # Compute split points + split_points_s = split_points(data.shape[0], n_blocks[2]) + split_points_r = split_points(data.shape[1], n_blocks[0]) + split_points_c = split_points(data.shape[2], n_blocks[1]) -# Create the file that records the slice values -slice_filename = name + '_slices.txt' + # Create the file that records the slice values + slice_filename = name + '_slices.txt' -# Save blocks -block_no = 0 -for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])): - for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])): - for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])): - n_s = s_stop - s_start + 1 - n_r = r_stop - r_start + 1 - n_c = c_stop - c_start + 1 - save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s)) - block_no += 1 + # Save blocks + block_no = 0 + for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])): + for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])): + for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])): + n_s = s_stop - s_start + 1 + n_r = r_stop - r_start + 1 + n_c = c_stop - c_start + 1 + save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s)) + block_no += 1 From ae8de9e25d48f3743bf9464dc82210ec6343c8b9 Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:01:55 -0800 Subject: [PATCH 03/17] Swap HDF5 dims to match presplit data handling --- .../ContourTreeAppDataIO.h | 126 +++++++++++------- 1 file changed, 79 insertions(+), 47 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 20c74dba7..cdc232e0d 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -83,11 +83,12 @@ using namespace H5; - /// Convert a 3D index of a cube to rank index vtkm::Id to1DIndex(vtkm::Id3 idx, vtkm::Id3 dims) { - return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; + // return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; + // Swap first and second dimension + return (idx[2] * dims[0] * dims[1]) + (idx[0] * dims[1]) + idx[1]; } /// Convert the rank index to the index of the cube @@ -96,8 +97,9 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) vtkm::Id3 res; res[2] = idx / (dims[0] * dims[1]); idx -= (res[2] * dims[0] * dims[1]); - res[1] = idx / dims[0]; - res[0] = idx % dims[0]; + // Swap index 0 and 1 + res[0] = idx / dims[0]; + res[1] = idx % dims[0]; return res; } @@ -127,7 +129,7 @@ bool read3DHDF5File(const int& mpi_rank, const std::string& dataset_name, const int& blocksPerRank, const vtkm::Id3& blocksPerDim, - const vtkm::Id3 selectSize, + const vtkm::Id3& selectSize, std::vector::size_type& nDims, vtkm::cont::PartitionedDataSet& useDataSet, vtkm::Id3& globalSize, @@ -160,8 +162,14 @@ bool read3DHDF5File(const int& mpi_rank, auto localBlockSizesPortal = localBlockSizes.WritePortal(); herr_t status; + //Set up file access property list with parallel I/O access + //hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + //MPI_Comm comm = MPI_COMM_WORLD; + //MPI_Info info = MPI_INFO_NULL; + //H5Pset_fapl_mpio(plist_id, comm, info); + // Open the file and the dataset - hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); // Get filespace for rank and dimension hid_t filespace = H5Dget_space(dataset); @@ -174,6 +182,7 @@ bool read3DHDF5File(const int& mpi_rank, return false; } hsize_t dims[nDims]; // dataset dimensions + status = H5Sget_simple_extent_dims(filespace, dims, NULL); globalSize[0] = selectSize[0] < 0 ? dims[0] : selectSize[0]; globalSize[1] = selectSize[1] < 0 ? dims[1] : selectSize[1]; globalSize[2] = selectSize[2] < 0 ? dims[2] : selectSize[2]; @@ -181,21 +190,37 @@ bool read3DHDF5File(const int& mpi_rank, hid_t dataspace = H5Dget_space(dataset); // Read a hyperslap // define the hyperslap - hsize_t count[2]; // size of the hyperslab in the file + hsize_t count[3]; // size of the hyperslab in the file hsize_t offset[3]; // hyperslab offset in the file - // Compute the origin and count - vtkm::Id3 blockSize(vtkm::Id(globalSize[0] / blocksPerDim[0]), - vtkm::Id(globalSize[1] / blocksPerDim[1]), - vtkm::Id(globalSize[2] / blocksPerDim[2])); + vtkm::Id3 blockSize(std::floor(vtkm::Id(globalSize[0] / blocksPerDim[0])), + std::floor(vtkm::Id(globalSize[1] / blocksPerDim[1])), + std::floor(vtkm::Id(globalSize[2] / blocksPerDim[2]))); vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); // compute the offset and count for the block for this rank offset[0] = blockSize[0] * blockIndex[0]; offset[1] = blockSize[1] * blockIndex[1]; - offset[1] = blockSize[2] * blockIndex[2]; + offset[2] = blockSize[2] * blockIndex[2]; count[0] = blockSize[0]; count[1] = blockSize[1]; count[2] = blockSize[2]; + // add ghost zone on the left + if (blockIndex[0] > 0) + { + offset[0] = offset[0] - 1; + count[0] = count[0] + 1; + } + if (blockIndex[1] > 0) + { + offset[1] = offset[1] - 1; + count[1] = count[1] + 1; + } + if (blockIndex[2] > 0) + { + offset[2] = offset[2] - 1; + count[2] = count[2] + 1; + } + // Check that we are not running over the end of the dataset if (offset[0] + count[0] > globalSize[0]) { count[0] = globalSize[0] - offset[0]; @@ -208,16 +233,16 @@ bool read3DHDF5File(const int& mpi_rank, { count[2] = globalSize[2] - offset[2]; } - // Select the data in the dataset + blockSize = vtkm::Id3{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }; + vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); // Define the memory space for reading hid_t memspace = H5Screate_simple(nDims, count, NULL); - // Define the hyperslap to read the data into memory - hsize_t offset_out[3]; - offset_out[0] = 0; - offset_out[1] = 0; - offset_out[2] = 0; - status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count, NULL); // Read data from hyperslab in the file into the hyperslab in std::size_t numVertices = count[0] * count[1] * count[2]; std::vector values(numVertices); @@ -227,13 +252,13 @@ bool read3DHDF5File(const int& mpi_rank, double data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -243,13 +268,13 @@ bool read3DHDF5File(const int& mpi_rank, int data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -259,13 +284,13 @@ bool read3DHDF5File(const int& mpi_rank, unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -280,27 +305,33 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::DataSetBuilderUniform dsb; vtkm::cont::DataSet ds; VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(count[0]), - static_cast(count[1]), - static_cast(count[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), + + /*const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) };*/ + // Swap first and second dimenion here as well for consistency + const vtkm::Vec v_origin{ static_cast(offset[1]), + static_cast(offset[0]), static_cast(offset[2]) }; + const vtkm::Id3 v_dims{ static_cast(blockSize[1]), + static_cast(blockSize[0]), + static_cast(blockSize[2]) }; vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); - localBlockIndicesPortal.Set(blockNo, blockIndex); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(count[0]), - static_cast(count[1]), - static_cast(count[2]) }); - + // Original order + //localBlockIndicesPortal.Set(blockNo, blockIndex); + //localBlockOriginsPortal.Set(blockNo, blockOrigin); + //localBlockSizesPortal.Set(blockNo, blockSize); + localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + localBlockOriginsPortal.Set(blockNo, vtkm::Id3(blockOrigin[1], blockOrigin[0], blockOrigin[2])); + localBlockSizesPortal.Set(blockNo, vtkm::Id3(blockSize[1], blockSize[0], blockSize[2])); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + // Swap the dimensions to match the pre-split file reader + globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); // Finished data read currTime = totalTime.GetElapsedTime(); dataReadTime = currTime - prevTime; @@ -308,7 +339,7 @@ bool read3DHDF5File(const int& mpi_rank, currTime = totalTime.GetElapsedTime(); buildDatasetTime = currTime - prevTime; - return false; + return true; } #endif @@ -543,6 +574,7 @@ bool readPreSplitFiles(const int& rank, vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); } + ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); From da38ff2a61a7956bfd508cafa5159d4c9da97f98 Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:04:43 -0800 Subject: [PATCH 04/17] Update distributed app --- .../ContourTreeApp.cxx | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index d51e6f110..1f086033b 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -393,11 +393,11 @@ int main(int argc, char* argv[]) << " forwardSummary=" << forwardSummary << std::endl << " nblocks=" << numBlocks << std::endl #ifdef ENABLE_HDFIO - << " dataset=" << dataset_name << std::endl + << " dataset=" << dataset_name << " (HDF5 only)" << std::endl << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," - << blocksPerDimIn[2] << std::endl + << blocksPerDimIn[2] << " (HDF5 only)" << std::endl << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," - << selectSize[2] << std::endl + << selectSize[2] << " (HDF5 only)" << std::endl #endif ); } @@ -528,27 +528,31 @@ int main(int argc, char* argv[]) readOk = false; #endif } - readOk = readSingleBlockFile( - // inputs - rank, - size, - filename, - numBlocks, - blocksPerRank, - // outputs - nDims, - useDataSet, - globalSize, - blocksPerDim, - localBlockIndices, - localBlockOrigins, - localBlockSizes, - // output timers - dataReadTime, - buildDatasetTime); + else + { + readOk = readSingleBlockFile( + // inputs + rank, + size, + filename, + numBlocks, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); + } } if (!readOk) { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data read failed."); MPI_Finalize(); return EXIT_FAILURE; } From a30d115789d4b170b7bb6cce54cb1a7f480b02ba Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 7 Mar 2022 03:19:52 -0800 Subject: [PATCH 05/17] Use Parallel HDF5 and fix axis swap bug --- .../ContourTreeApp.cxx | 41 ++++++++++++++++- .../ContourTreeAppDataIO.h | 45 ++++++++++++++----- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 1f086033b..6a3f683d4 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -100,7 +100,7 @@ VTKM_THIRDPARTY_POST_INCLUDE #include #include -using ValueType = vtkm::Float64; +using ValueType = vtkm::Float32; #define SINGLE_FILE_STDOUT_STDERR @@ -441,6 +441,30 @@ int main(int argc, char* argv[]) return 255; } + VTKM_LOG_S(exampleLogLevel, + std::endl + << " ------------ Settings -----------" << std::endl + << " filename=" << filename << std::endl + << " preSplitFiles=" << preSplitFiles << std::endl + << " device=" << device.GetName() << std::endl + << " mc=" << useMarchingCubes << std::endl + << " useFullBoundary=" << !useBoundaryExtremaOnly << std::endl + << " saveDot=" << saveDotFiles << std::endl + << " saveOutputData=" << saveOutputData << std::endl + << " forwardSummary=" << forwardSummary << std::endl + << " numBlocks=" << numBlocks << std::endl + << " augmentHierarchicalTree=" << augmentHierarchicalTree << std::endl + << " numRanks=" << size << std::endl + << " rank=" << rank << std::endl +#ifdef ENABLE_HDFIO + << " dataset=" << dataset_name << " (HDF5 only)" << std::endl + << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," + << blocksPerDimIn[2] << " (HDF5 only)" << std::endl + << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," << selectSize[2] + << " (HDF5 only)" << std::endl +#endif + ); + // Measure our time for startup currTime = totalTime.GetElapsedTime(); vtkm::Float64 startUpTime = currTime - prevTime; @@ -505,7 +529,7 @@ int main(int argc, char* argv[]) #ifdef ENABLE_HDFIO blocksPerDim = blocksPerDimIn; readOk = read3DHDF5File( - // inputs + // inputs (blocksPerDim is being modified to swap dimension to fit we re-ordering of dimension) rank, filename, dataset_name, @@ -590,6 +614,19 @@ int main(int argc, char* argv[]) vtkm::Float64 dataReadSyncTime = currTime - prevTime; prevTime = currTime; + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << localBlockSizesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blocksPerDim=" << blocksPerDim << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl + + ); + // Convert the mesh of values into contour tree, pairs of vertex ids vtkm::filter::ContourTreeUniformDistributed filter(blocksPerDim, globalSize, diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index cdc232e0d..1d08b608b 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -79,8 +79,11 @@ #ifdef ENABLE_HDFIO -#include "H5Cpp.h" -using namespace H5; +// #include "H5Cpp.h" +#include "hdf5.h" +//using namespace H5; + +#include /// Convert a 3D index of a cube to rank index @@ -98,8 +101,12 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) res[2] = idx / (dims[0] * dims[1]); idx -= (res[2] * dims[0] * dims[1]); // Swap index 0 and 1 - res[0] = idx / dims[0]; - res[1] = idx % dims[0]; + // res[0] = idx / dims[0]; + // res[1] = idx % dims[0]; + // Don't swap index here, because this function is used with the original + // HDF5 layout and the 3D index is swapped later on + res[1] = idx / dims[0]; + res[0] = idx % dims[0]; return res; } @@ -128,7 +135,7 @@ bool read3DHDF5File(const int& mpi_rank, const std::string& filename, const std::string& dataset_name, const int& blocksPerRank, - const vtkm::Id3& blocksPerDim, + vtkm::Id3& blocksPerDim, const vtkm::Id3& selectSize, std::vector::size_type& nDims, vtkm::cont::PartitionedDataSet& useDataSet, @@ -163,13 +170,14 @@ bool read3DHDF5File(const int& mpi_rank, herr_t status; //Set up file access property list with parallel I/O access - //hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); - //MPI_Comm comm = MPI_COMM_WORLD; - //MPI_Info info = MPI_INFO_NULL; - //H5Pset_fapl_mpio(plist_id, comm, info); + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + H5Pset_fapl_mpio(plist_id, comm, info); // Open the file and the dataset - hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// + //hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id); // hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); // Get filespace for rank and dimension hid_t filespace = H5Dget_space(dataset); @@ -192,11 +200,13 @@ bool read3DHDF5File(const int& mpi_rank, // define the hyperslap hsize_t count[3]; // size of the hyperslab in the file hsize_t offset[3]; // hyperslab offset in the file + // Compute the origin and count vtkm::Id3 blockSize(std::floor(vtkm::Id(globalSize[0] / blocksPerDim[0])), std::floor(vtkm::Id(globalSize[1] / blocksPerDim[1])), std::floor(vtkm::Id(globalSize[2] / blocksPerDim[2]))); vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); + // compute the offset and count for the block for this rank offset[0] = blockSize[0] * blockIndex[0]; offset[1] = blockSize[1] * blockIndex[1]; @@ -239,6 +249,7 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), static_cast(offset[1]), static_cast(offset[2]) }; + // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); // Define the memory space for reading @@ -321,6 +332,7 @@ bool read3DHDF5File(const int& mpi_rank, ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); + // Original order //localBlockIndicesPortal.Set(blockNo, blockIndex); //localBlockOriginsPortal.Set(blockNo, blockOrigin); @@ -332,6 +344,19 @@ bool read3DHDF5File(const int& mpi_rank, // dims[0] -> col; dims[1] -> row, dims[2] ->slice // Swap the dimensions to match the pre-split file reader globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); + // Swap also the blocks per dimension accordingly + blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << localBlockSizesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + // Finished data read currTime = totalTime.GetElapsedTime(); dataReadTime = currTime - prevTime; From 5aed80d7d3c1ffd03af47d1ffbe95ab39c0c661c Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 14 Mar 2022 15:09:43 -0700 Subject: [PATCH 06/17] Fix HDF5 float data read bug --- .../ContourTreeAppDataIO.h | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 1d08b608b..a1bff80a1 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -274,6 +274,22 @@ bool read3DHDF5File(const int& mpi_rank, } } } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_FLOAT)) + { + float data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < count[0]; k++) + { + for (vtkm::Id j = 0; j < count[1]; j++) + { + for (vtkm::Id i = 0; i < count[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); + } + } + } + } else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_INT)) { int data_out[count[0]][count[1]][count[2]]; // output buffer @@ -306,6 +322,11 @@ bool read3DHDF5File(const int& mpi_rank, } } } + else + { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data type not supported by the example HDF5 reader"); + throw "Data type not supported by the example HDF5 reader"; + } } // Release HDF5 resources H5Sclose(dataspace); From e076c11a26f44ab408f849d578eb6af2d1affc7c Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 14:24:45 -0700 Subject: [PATCH 07/17] Add missing linking to to vtkmio --- vtkm/io/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vtkm/io/CMakeLists.txt b/vtkm/io/CMakeLists.txt index 88016c537..41f86b147 100644 --- a/vtkm/io/CMakeLists.txt +++ b/vtkm/io/CMakeLists.txt @@ -85,7 +85,7 @@ vtkm_library( target_link_libraries(vtkm_io PUBLIC vtkm_cont PRIVATE vtkm_lodepng) if (VTKm_ENABLE_HDF5_IO) target_include_directories(vtkm_io PRIVATE $) - target_link_libraries(vtkm_io PRIVATE ${HDF5_HL_LIBRARIES}) + target_link_libraries(vtkm_io PRIVATE ${HDF5_HL_LIBRARIES} ${HDF5_LIBRARIES}) endif() add_subdirectory(internal) From 2f488b485d77c5c0f4734c699a618a06c45ffd24 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 14:25:20 -0700 Subject: [PATCH 08/17] Add missing HDF5 include/link definitions distributed contour tree app --- examples/contour_tree_distributed/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 57997fdb6..7fe91ef0e 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -78,6 +78,8 @@ if (VTKm_ENABLE_MPI) endif() if (VTKm_ENABLE_HDF5_IO) target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_HDFIO") + target_include_directories(ContourTree_Distributed PRIVATE ${HDF5_INCLUDE_DIR}) + target_link_libraries(ContourTree_Distributed ${HDF5_LIBRARIES}) endif () add_executable(TreeCompiler TreeCompilerApp.cxx) From 1b1636b6d8de831f967c79fd5d8f4f548d14a1d4 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 19:00:57 -0400 Subject: [PATCH 09/17] Update includes for distributed contour tree app to filter refactor --- .../contour_tree_distributed/BranchCompilerApp.cxx | 2 +- examples/contour_tree_distributed/ContourTreeApp.cxx | 10 +++++----- .../contour_tree_distributed/ContourTreeAppDataIO.h | 2 +- examples/contour_tree_distributed/TreeCompilerApp.cxx | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/contour_tree_distributed/BranchCompilerApp.cxx b/examples/contour_tree_distributed/BranchCompilerApp.cxx index 67be61ec0..b613a508c 100644 --- a/examples/contour_tree_distributed/BranchCompilerApp.cxx +++ b/examples/contour_tree_distributed/BranchCompilerApp.cxx @@ -65,7 +65,7 @@ // //======================================================================================= -#include +#include int main() { // main() diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 6a3f683d4..6dbbe235f 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -76,11 +76,11 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include // clang-format off VTKM_THIRDPARTY_PRE_INCLUDE diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index a1bff80a1..3662997ee 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -65,7 +65,7 @@ #include #include -#include +#include #include #include diff --git a/examples/contour_tree_distributed/TreeCompilerApp.cxx b/examples/contour_tree_distributed/TreeCompilerApp.cxx index 75665010f..319fce902 100644 --- a/examples/contour_tree_distributed/TreeCompilerApp.cxx +++ b/examples/contour_tree_distributed/TreeCompilerApp.cxx @@ -61,7 +61,7 @@ //============================================================================== #include -#include +#include // main routine int main(int argc, char** argv) From a8786cd9782d9b3349989c68560b719b983af228 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 19:01:21 -0400 Subject: [PATCH 10/17] Added missing iomanip include --- .../worklet/contourtree_distributed/BranchCompiler.h | 1 + 1 file changed, 1 insertion(+) diff --git a/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h b/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h index d874044c6..0b838d3fd 100644 --- a/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h +++ b/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h @@ -54,6 +54,7 @@ #ifndef _BRANCHCOMPILER_H_ #define _BRANCHCOMPILER_H_ +#include #include #include #include From eda4b1be11eb15a967513ae840186c1b2c09ded2 Mon Sep 17 00:00:00 2001 From: oruebel Date: Wed, 7 Sep 2022 05:11:39 -0700 Subject: [PATCH 11/17] Run branchDecomposition without requiring save and add timer --- .../ContourTreeApp.cxx | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 6dbbe235f..56e317fc0 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -648,22 +648,31 @@ int main(int argc, char* argv[]) vtkm::Float64 computeContourTreeTime = currTime - prevTime; prevTime = currTime; - // Make sure that all ranks have started up before we start the data read + // Record the time to synchronize after the filter has finished MPI_Barrier(comm); currTime = totalTime.GetElapsedTime(); vtkm::Float64 postFilterSyncTime = currTime - prevTime; prevTime = currTime; + // Compute branch decomposition if requested + vtkm::cont::PartitionedDataSet bd_result; + if (computeHierarchicalVolumetricBranchDecomposition) + { + vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter( + blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); + bd_result = bd_filter.Execute(result); + } + currTime = totalTime.GetElapsedTime(); + vtkm::Float64 branchDecompTime = currTime - prevTime; + prevTime = currTime; + + // Save output if (saveOutputData) { if (augmentHierarchicalTree) { if (computeHierarchicalVolumetricBranchDecomposition) { - vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter( - blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); - auto bd_result = bd_filter.Execute(result); - for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no) { auto ds = bd_result.GetPartition(ds_no); @@ -780,6 +789,8 @@ int main(int argc, char* argv[]) << ": " << computeContourTreeTime << " seconds" << std::endl << std::setw(42) << std::left << " Post filter Sync" << ": " << postFilterSyncTime << " seconds" << std::endl + << std::setw(42) << std::left << " Branch Decomposition" + << ": " << branchDecompTime << " seconds" << std::endl << std::setw(42) << std::left << " Save Tree Compiler Data" << ": " << saveOutputDataTime << " seconds" << std::endl << std::setw(42) << std::left << " Total Time" From 63ec3f3bcb988bce222053679c728d0408f08c1b Mon Sep 17 00:00:00 2001 From: Oliver Ruebel Date: Tue, 20 Sep 2022 01:22:16 -0700 Subject: [PATCH 12/17] Updated contour tree distributed IO to use CellSetStructured --- .../ContourTreeAppDataIO.h | 999 +++++++++--------- 1 file changed, 489 insertions(+), 510 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 3662997ee..eb31ced53 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -123,10 +124,6 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) /// @param[out] localBlockIndices Array with the (x,y,z) index of each local data block with /// with respect to blocksPerDim -/// @param[out] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[out] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block /// @param[out] dataReadTime Time to read the data /// @param[out] buildDatasetTime Time to construct the VTKm datasets /// @returns bool indicating whether the read was successful or not @@ -141,8 +138,6 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::PartitionedDataSet& useDataSet, vtkm::Id3& globalSize, vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, vtkm::Float64& dataReadTime, vtkm::Float64& buildDatasetTime) { @@ -162,11 +157,7 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::Id blockNo = 0; // TODO: Update this if we have multiple blocks per rank localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); herr_t status; //Set up file access property list with parallel I/O access @@ -338,9 +329,13 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::DataSet ds; VTKM_ASSERT(nDims == 3); - /*const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) };*/ + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + // Swap the dimensions to match the pre-split file reader + globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); + // Swap also the blocks per dimension accordingly + blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + // Swap first and second dimenion here as well for consistency const vtkm::Vec v_origin{ static_cast(offset[1]), static_cast(offset[0]), @@ -350,535 +345,519 @@ bool read3DHDF5File(const int& mpi_rank, static_cast(blockSize[2]) }; vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ + v_origin + { + [0], + v_origin {[1], v_origin{ + [2] }); + ds.SetCellSet(cs); - // Original order - //localBlockIndicesPortal.Set(blockNo, blockIndex); - //localBlockOriginsPortal.Set(blockNo, blockOrigin); - //localBlockSizesPortal.Set(blockNo, blockSize); - localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); - localBlockOriginsPortal.Set(blockNo, vtkm::Id3(blockOrigin[1], blockOrigin[0], blockOrigin[2])); - localBlockSizesPortal.Set(blockNo, vtkm::Id3(blockSize[1], blockSize[0], blockSize[2])); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - // Swap the dimensions to match the pre-split file reader - globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); - // Swap also the blocks per dimension accordingly - blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Log information of the (first) local data block - VTKM_LOG_S(vtkm::cont::LogLevel::Info, - "" << std::setw(42) << std::left << "blockSize" - << ":" << localBlockSizesPortal.Get(0) << std::endl - << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) - << std::endl - << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) - << std::endl - << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + // Swap order to match pre-splot + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); - // Finished data read - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << v_dims << std::endl + << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl + << std::setw(42) << std::left + << "blockIndices=" << localBlockIndicesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; + } #endif -/// Read data from pre-split ASCII files -/// @param[in] rank The current MPI rank the function is called from -/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block -/// @param[in] blocksPerRank Number of data blocks to process on each rank -/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) -/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter -/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) -/// @param[in] blocksPerDim Number of data blocks used in each data dimension -/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with -/// with respect to blocksPerDim -/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block -/// @param[in] dataReadTime Time to read the data -/// @param[in] buildDatasetTime Time to construct the VTKm datasets -/// @returns bool indicating whether the read was successful or not -template -bool readPreSplitFiles(const int& rank, - const std::string& filename, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) -{ - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; - - localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; - - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - return false; - } - - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; - - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - return false; - } - - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); - - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); - - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + /// Read data from pre-split ASCII files + /// @param[in] rank The current MPI rank the function is called from + /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block + /// @param[in] blocksPerRank Number of data blocks to process on each rank + /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) + /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter + /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) + /// @param[in] blocksPerDim Number of data blocks used in each data dimension + /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with + /// with respect to blocksPerDim + /// @param[in] dataReadTime Time to read the data + /// @param[in] buildDatasetTime Time to construct the VTKm datasets + /// @returns bool indicating whether the read was successful or not + template + bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - return false; - } - } + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - return false; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - return false; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) + { + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - return false; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - return false; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ + static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + { + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - } + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - localBlockIndicesPortal.Set(blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(nDims == 3 ? offset[2] : 0) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(nDims == 3 ? dims[2] : 0) }); + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast(std::accumulate( + dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } -/// Read data from a single file and split the data into blocks across ranks -/// This is a simple implementation that will read the full data on all ranks -/// and then extract only the relevant subblock for that rank. -/// The function support reading from BOV as well from ASCII files -/// -/// @param[in] rank The current MPI rank the function is called from -/// @param[in] size The number of MPI ranks -/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block -/// @param[in] numBlocks Number of blocks to use during computation -/// @param[in] blocksPerRank Number of data blocks to process on each rank -/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) -/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter -/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) -/// @param[in] blocksPerDim Number of data blocks used in each data dimension -/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with -/// with respect to blocksPerDim -/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block -/// @param[in] dataReadTime Time to read the data -/// @param[in] buildDatasetTime Time to construct the VTKm datasets -/// @returns bool indicating whether the read was successful or not -template -bool readSingleBlockFile(const int& rank, - const int& size, - const std::string& filename, - const int& numBlocks, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) -{ - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } - localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; - vtkm::cont::DataSet inDataSet; - // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. - std::vector values; - std::vector dims; + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); + ds.SetCellSet(cs); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); + ds.SetCellSet(cs); + } - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) - { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU - } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + localBlockIndicesPortal.Set( + blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = - nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - return false; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = - nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, (blockStart / blockSliceSize), 0)); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, 0, (blockStart / blockSliceSize))); - localBlockSizesPortal.Set( - localBlockIndex, - vtkm::Id3(static_cast(dims[0]), static_cast(dims[1]), currBlockSize)); + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; } - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + /// Read data from a single file and split the data into blocks across ranks + /// This is a simple implementation that will read the full data on all ranks + /// and then extract only the relevant subblock for that rank. + /// The function support reading from BOV as well from ASCII files + /// + /// @param[in] rank The current MPI rank the function is called from + /// @param[in] size The number of MPI ranks + /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block + /// @param[in] numBlocks Number of blocks to use during computation + /// @param[in] blocksPerRank Number of data blocks to process on each rank + /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) + /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter + /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) + /// @param[in] blocksPerDim Number of data blocks used in each data dimension + /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with + /// with respect to blocksPerDim + /// @param[in] dataReadTime Time to read the data + /// @param[in] buildDatasetTime Time to construct the VTKm datasets + /// @returns bool indicating whether the read was successful or not + template + bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) + { + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; + + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; + + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast(std::accumulate( + dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) + : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = nDims == 2 ? static_cast(dims[0]) + : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); + } + + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); + + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; + } #endif From fbc313186f7e5e68e101e627d453f11656086d48 Mon Sep 17 00:00:00 2001 From: oruebel Date: Tue, 20 Sep 2022 01:42:15 -0700 Subject: [PATCH 13/17] Fix error in ContourTreeAppDataIO --- .../ContourTreeAppDataIO.h | 951 +++++++++--------- 1 file changed, 471 insertions(+), 480 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index eb31ced53..badbe61d1 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -348,516 +348,507 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::CellSetStructured<3> cs; cs.SetPointDimensions(v_dims); cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3{ - v_origin + cs.SetGlobalPointIndexStart(vtkm::Id3{ v_origin[0], v_origin[1], v_origin[2] }); + ds.SetCellSet(cs); + + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + + // Swap order to match pre-splot + localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << v_dims << std::endl + << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} +#endif + + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; + + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) { - [0], - v_origin {[1], v_origin{ - [2] }); - ds.SetCellSet(cs); + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; - // Swap order to match pre-splot - localBlockIndicesPortal.Set(blockNo, - vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } - // Log information of the (first) local data block - VTKM_LOG_S(vtkm::cont::LogLevel::Info, - "" << std::setw(42) << std::left << "blockSize" - << ":" << v_dims << std::endl - << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl - << std::setw(42) << std::left - << "blockIndices=" << localBlockIndicesPortal.Get(0) << std::endl - << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); - // Finished data read - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; - } -#endif - - - - /// Read data from pre-split ASCII files - /// @param[in] rank The current MPI rank the function is called from - /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block - /// @param[in] blocksPerRank Number of data blocks to process on each rank - /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) - /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter - /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) - /// @param[in] blocksPerDim Number of data blocks used in each data dimension - /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with - /// with respect to blocksPerDim - /// @param[in] dataReadTime Time to read the data - /// @param[in] buildDatasetTime Time to construct the VTKm datasets - /// @returns bool indicating whether the read was successful or not - template - bool readPreSplitFiles(const int& rank, - const std::string& filename, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) { - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } - localBlockIndices.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - return false; - } + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - return false; - } + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ - static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) - { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - return false; - } - } + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - return false; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - return false; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - return false; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); + ds.SetCellSet(cs); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); + ds.SetCellSet(cs); + } - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - return false; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast(std::accumulate( - dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } +/// Read data from a single file and split the data into blocks across ranks +/// This is a simple implementation that will read the full data on all ranks +/// and then extract only the relevant subblock for that rank. +/// The function support reading from BOV as well from ASCII files +/// +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] size The number of MPI ranks +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] numBlocks Number of blocks to use during computation +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - vtkm::cont::CellSetStructured<2> cs; - cs.SetPointDimensions(v_dims); - cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); - cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); - ds.SetCellSet(cs); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - vtkm::cont::CellSetStructured<3> cs; - cs.SetPointDimensions(v_dims); - cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); - ds.SetCellSet(cs); - } + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; - localBlockIndicesPortal.Set( - blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = + nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = + nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); } + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); - /// Read data from a single file and split the data into blocks across ranks - /// This is a simple implementation that will read the full data on all ranks - /// and then extract only the relevant subblock for that rank. - /// The function support reading from BOV as well from ASCII files - /// - /// @param[in] rank The current MPI rank the function is called from - /// @param[in] size The number of MPI ranks - /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block - /// @param[in] numBlocks Number of blocks to use during computation - /// @param[in] blocksPerRank Number of data blocks to process on each rank - /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) - /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter - /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) - /// @param[in] blocksPerDim Number of data blocks used in each data dimension - /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with - /// with respect to blocksPerDim - /// @param[in] dataReadTime Time to read the data - /// @param[in] buildDatasetTime Time to construct the VTKm datasets - /// @returns bool indicating whether the read was successful or not - template - bool readSingleBlockFile(const int& rank, - const int& size, - const std::string& filename, - const int& numBlocks, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) - { - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; - - localBlockIndices.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - - vtkm::cont::DataSet inDataSet; - // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. - std::vector values; - std::vector dims; - - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) - { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU - } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; - - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast(std::accumulate( - dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) - : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - return false; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = nDims == 2 ? static_cast(dims[0]) - : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - vtkm::cont::CellSetStructured<2> cs; - cs.SetPointDimensions(vdims); - cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); - cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); - ds.SetCellSet(cs); - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - vtkm::cont::CellSetStructured<3> cs; - cs.SetPointDimensions(vdims); - cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); - ds.SetCellSet(cs); - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - } - - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; - } + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} #endif From 0e5aeb10fe6a8efba0873141784db6a39586374a Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:08:58 -0800 Subject: [PATCH 14/17] Update contour_tree_distributed/CMakeLists.txt --- examples/contour_tree_distributed/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 418e9d5d5..ac605eeed 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(VTKm REQUIRED QUIET) # MPI #################################### if (VTKm_ENABLE_MPI AND TARGET vtkm::filter_scalar_topology AND TARGET vtkm::io) - add_executable(ContourTree_Distributed ContourTreeApp.cxx, ContourTreeAppDataIO.h) + add_executable(ContourTree_Distributed ContourTreeApp.cxx ContourTreeAppDataIO.h) target_link_libraries(ContourTree_Distributed vtkm::filter_scalar_topology vtkm::io MPI::MPI_CXX) vtkm_add_target_information(ContourTree_Distributed MODIFY_CUDA_FLAGS @@ -87,7 +87,7 @@ if (VTKm_ENABLE_MPI AND TARGET vtkm::filter_scalar_topology AND TARGET vtkm::io) vtkm_add_target_information(TreeCompiler DROP_UNUSED_SYMBOLS) add_executable(BranchCompiler BranchCompilerApp.cxx) - target_link_libraries(BranchCompiler vtkm::filter) + target_link_libraries(BranchCompiler vtkm::filter_scalar_topology) vtkm_add_target_information(BranchCompiler DROP_UNUSED_SYMBOLS) configure_file(split_data_2d.py split_data_2d.py COPYONLY) From 27d3d403f6d30ea69881f49cf054fcae9e0f9ac1 Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:25:17 -0800 Subject: [PATCH 15/17] Remove bade import in streamline_mpi example --- examples/streamline_mpi/StreamlineMPI.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/streamline_mpi/StreamlineMPI.cxx b/examples/streamline_mpi/StreamlineMPI.cxx index aec518cec..1de54fd24 100644 --- a/examples/streamline_mpi/StreamlineMPI.cxx +++ b/examples/streamline_mpi/StreamlineMPI.cxx @@ -18,7 +18,6 @@ #include #include #include -#include #include #include From 113e6be327e94f0fa65779058ce899fd05339d1e Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:25:48 -0800 Subject: [PATCH 16/17] Remove bad import in ContourTreeApp distributed --- examples/contour_tree_distributed/ContourTreeApp.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 8d375a295..7b836a135 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -64,7 +64,6 @@ #include #include #include -#include #include #include #include From 44c276174002cea36813c82eabad59c108e54c81 Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:26:32 -0800 Subject: [PATCH 17/17] Remove bad import and fix warning in ContourTreeAppDataIO.h --- .../ContourTreeAppDataIO.h | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index badbe61d1..acbf32b1c 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -61,7 +61,6 @@ #include #include #include -#include #include #include #include @@ -222,24 +221,24 @@ bool read3DHDF5File(const int& mpi_rank, count[2] = count[2] + 1; } // Check that we are not running over the end of the dataset - if (offset[0] + count[0] > globalSize[0]) + if (vtkm::Id(offset[0] + count[0]) > globalSize[0]) { count[0] = globalSize[0] - offset[0]; } - if (offset[1] + count[1] > globalSize[1]) + if (vtkm::Id(offset[1] + count[1]) > globalSize[1]) { count[1] = globalSize[1] - offset[1]; } - if (offset[2] + count[2] > globalSize[2]) + if (vtkm::Id(offset[2] + count[2]) > globalSize[2]) { count[2] = globalSize[2] - offset[2]; } blockSize = vtkm::Id3{ static_cast(count[0]), static_cast(count[1]), static_cast(count[2]) }; - vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), + /*vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), static_cast(offset[1]), - static_cast(offset[2]) }; + static_cast(offset[2]) };*/ // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); @@ -254,11 +253,11 @@ bool read3DHDF5File(const int& mpi_rank, double data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -270,11 +269,11 @@ bool read3DHDF5File(const int& mpi_rank, float data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -286,11 +285,11 @@ bool read3DHDF5File(const int& mpi_rank, int data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -302,11 +301,11 @@ bool read3DHDF5File(const int& mpi_rank, unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); }