From e9fcd7ca782a7f9dd7b42d42fcf0085a8ae2488c Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 13 Dec 2021 03:35:04 -0800 Subject: [PATCH 01/29] Moved ContourTree I/O to new module and added HDF5 I/O --- .../contour_tree_distributed/CMakeLists.txt | 5 +- .../ContourTreeApp.cxx | 564 ++++-------- .../ContourTreeAppDataIO.h | 806 ++++++++++++++++++ 3 files changed, 954 insertions(+), 421 deletions(-) create mode 100644 examples/contour_tree_distributed/ContourTreeAppDataIO.h diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 6c146ac3a..57997fdb6 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(VTKm REQUIRED QUIET) # MPI #################################### if (VTKm_ENABLE_MPI) - add_executable(ContourTree_Distributed ContourTreeApp.cxx) + add_executable(ContourTree_Distributed ContourTreeApp.cxx ContourTreeAppDataIO.h) target_link_libraries(ContourTree_Distributed vtkm_filter vtkm_io MPI::MPI_CXX) vtkm_add_target_information(ContourTree_Distributed MODIFY_CUDA_FLAGS @@ -76,6 +76,9 @@ if (VTKm_ENABLE_MPI) if (TARGET vtkm::tbb) target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_SET_NUM_THREADS") endif() + if (VTKm_ENABLE_HDF5_IO) + target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_HDFIO") + endif () add_executable(TreeCompiler TreeCompilerApp.cxx) target_link_libraries(TreeCompiler vtkm_filter) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index b857a77cd..d51e6f110 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -71,6 +71,8 @@ #include #include +#include "ContourTreeAppDataIO.h" + #include #include #include @@ -272,6 +274,48 @@ int main(int argc, char* argv[]) } } +#ifdef ENABLE_HDFIO + std::string dataset_name = "data"; + if (parser.hasOption("--dataset")) + { + dataset_name = parser.getOption("--dataset"); + } + + vtkm::Id3 blocksPerDimIn(1, 1, size); + if (parser.hasOption("--blocksPerDim")) + { + std::string temp = parser.getOption("--blocksPerDim"); + if (std::count(temp.begin(), temp.end(), ',') != 2) + { + std::cerr << "Invalid --blocksPerDim option. Expected string of the form 'x,y,z' got" << temp + << std::endl; + MPI_Finalize(); + return EXIT_FAILURE; + } + char* tempC = (char*)temp.c_str(); + blocksPerDimIn[0] = std::stoi(std::strtok(tempC, ",")); + blocksPerDimIn[1] = std::stoi(std::strtok(nullptr, ",")); + blocksPerDimIn[2] = std::stoi(std::strtok(nullptr, ",")); + } + + vtkm::Id3 selectSize(-1, -1, -1); + if (parser.hasOption("--selectSize")) + { + std::string temp = parser.getOption("--selectSize"); + if (std::count(temp.begin(), temp.end(), ',') != 2) + { + std::cerr << "Invalid --selectSize option. Expected string of the form 'x,y,z' got" << temp + << std::endl; + MPI_Finalize(); + return EXIT_FAILURE; + } + char* tempC = (char*)temp.c_str(); + selectSize[0] = std::stoi(std::strtok(tempC, ",")); + selectSize[1] = std::stoi(std::strtok(nullptr, ",")); + selectSize[2] = std::stoi(std::strtok(nullptr, ",")); + } +#endif + if (argc < 2 || parser.hasOption("--help") || parser.hasOption("-h")) { if (rank == 0) @@ -307,11 +351,24 @@ int main(int argc, char* argv[]) << " computation (Default=False). " << std::endl; std::cout << "--saveOutputData Save data files with hierarchical tree or volume data" << std::endl; - std::cout << "--numBlocks Number of blocks to use during computation " + std::cout << "--numBlocks Number of blocks to use during computation. (Sngle block " + "ASCII/BOV file reader only)" << "(Default=number of MPI ranks.)" << std::endl; std::cout << "--forwardSummary Forward the summary timings also to the per-rank " << std::endl << " log files. Default is to round-robin print the " << std::endl << " summary instead" << std::endl; +#ifdef ENABLE_HDFIO + std::cout << "--dataset Name of the dataset to load (HDF5 reader only)(Default=data)" + << std::endl; + std::cout << "--blocksPerDim Number of blocks to split the data into. This is a string of " + "the form 'x,y,z'." + "(HDF5 reader only)(Default='1,1,#ranks')" + << std::endl; + std::cout + << "--selectSize Size of the subblock to read. This is a string of the form 'x,y,z'." + "(HDF5 reader only)(Default='-1,-1,-1')" + << std::endl; +#endif std::cout << std::endl; } MPI_Finalize(); @@ -334,7 +391,15 @@ int main(int argc, char* argv[]) << computeHierarchicalVolumetricBranchDecomposition << std::endl << " saveOutputData=" << saveOutputData << std::endl << " forwardSummary=" << forwardSummary << std::endl - << " nblocks=" << numBlocks << std::endl); + << " nblocks=" << numBlocks << std::endl +#ifdef ENABLE_HDFIO + << " dataset=" << dataset_name << std::endl + << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," + << blocksPerDimIn[2] << std::endl + << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," + << selectSize[2] << std::endl +#endif + ); } // Redirect stdout to file if we are using MPI with Debugging @@ -411,430 +476,90 @@ int main(int argc, char* argv[]) auto localBlockSizesPortal = localBlockSizes.WritePortal(); // Read the pre-split data files + bool readOk = true; if (preSplitFiles) { - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; - - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; - - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); - - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); - - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) - { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - } - - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); - - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); - - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); - - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - MPI_Finalize(); - return EXIT_FAILURE; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); - - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); - - localBlockIndicesPortal.Set( - blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(nDims == 3 ? offset[2] : 0) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(nDims == 3 ? dims[2] : 0) }); - - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - - // Print the mesh metadata - if (rank == 0) - { - VTKM_LOG_S(exampleLogLevel, - std::endl - << " ---------------- Input Mesh Properties --------------" << std::endl - << " Number of dimensions: " << nDims << std::endl); - } + readOk = readPreSplitFiles( + // inputs + rank, + filename, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); } // Read single-block data and split it for the ranks else { - vtkm::cont::DataSet inDataSet; - // Currently FloatDefualt would be fine, but it could cause problems if we ever - // read binary files here. - std::vector values; - std::vector dims; - - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) + bool isHDF5 = (0 == filename.compare(filename.length() - 3, 3, ".h5")); + if (isHDF5) { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - // TODO All we should need to do to implement BOV support is to copy the values - // in the values vector and copy the dimensions in the dims vector - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU +#ifdef ENABLE_HDFIO + blocksPerDim = blocksPerDimIn; + readOk = read3DHDF5File( + // inputs + rank, + filename, + dataset_name, + blocksPerRank, + blocksPerDim, + selectSize, + // outputs + nDims, + useDataSet, + globalSize, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); +#else + VTKM_LOG_S(vtkm::cont::LogLevel::Error, + "Can't read HDF5 file. HDF5 reader disabled for this build."); + readOk = false; +#endif } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; + readOk = readSingleBlockFile( + // inputs + rank, + size, + filename, + numBlocks, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); + } + if (!readOk) + { + MPI_Finalize(); + return EXIT_FAILURE; + } - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - MPI_Finalize(); - return EXIT_FAILURE; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Print the mesh metadata - if (rank == 0) - { - VTKM_LOG_S(exampleLogLevel, - std::endl - << " ---------------- Input Mesh Properties --------------" << std::endl - << " Number of dimensions: " << nDims); - } - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) - : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - MPI_Finalize(); - return EXIT_FAILURE; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = - nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, (blockStart / blockSliceSize), 0)); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, 0, (blockStart / blockSliceSize))); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - currBlockSize)); - } - - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } + // Print the mesh metadata + if (rank == 0) + { + VTKM_LOG_S(exampleLogLevel, + std::endl + << " ---------------- Input Mesh Properties --------------" << std::endl + << " Number of dimensions: " << nDims); } // Check if marching cubes is enabled for non 3D data @@ -852,9 +577,8 @@ int main(int argc, char* argv[]) return EXIT_FAILURE; } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - prevTime = currTime; + // reset timer after read. the dataReadTime and buildDatasetTime are measured by the read functions + prevTime = totalTime.GetElapsedTime(); // Make sure that all ranks have started up before we start the data read MPI_Barrier(comm); diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h new file mode 100644 index 000000000..20c74dba7 --- /dev/null +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -0,0 +1,806 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +// +// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +// Copyright 2014 UT-Battelle, LLC. +// Copyright 2014 Los Alamos National Security. +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National +// Laboratory (LANL), the U.S. Government retains certain rights in +// this software. +//============================================================================ +// Copyright (c) 2018, The Regents of the University of California, through +// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals +// from the U.S. Dept. of Energy). All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// (1) Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// (2) Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// (3) Neither the name of the University of California, Lawrence Berkeley National +// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +// OF THE POSSIBILITY OF SUCH DAMAGE. +// +//============================================================================= +// +// I/O functions used by the ContourTreeApp for data read. +//============================================================================== + +#ifndef vtk_m_examples_ContourTreeAppDataIO_hxx +#define vtk_m_examples_ContourTreeAppDataIO_hxx + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef ENABLE_HDFIO +#include "H5Cpp.h" +using namespace H5; + + + +/// Convert a 3D index of a cube to rank index +vtkm::Id to1DIndex(vtkm::Id3 idx, vtkm::Id3 dims) +{ + return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; +} + +/// Convert the rank index to the index of the cube +vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) +{ + vtkm::Id3 res; + res[2] = idx / (dims[0] * dims[1]); + idx -= (res[2] * dims[0] * dims[1]); + res[1] = idx / dims[0]; + res[0] = idx % dims[0]; + return res; +} + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] dataset_name Name of the dataset in the HDF5 file to read +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[in] blocksPerDim Number of data blocks to use per dimension +/// @param[in] selectSize Select subset of this size from the dataset. Set to (-1,-1,-1) to select the full size +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[out] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[out] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[out] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[out] dataReadTime Time to read the data +/// @param[out] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool read3DHDF5File(const int& mpi_rank, + const std::string& filename, + const std::string& dataset_name, + const int& blocksPerRank, + const vtkm::Id3& blocksPerDim, + const vtkm::Id3 selectSize, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + // TODO not supported yet + if (blocksPerRank > 1) + { + VTKM_LOG_S( + vtkm::cont::LogLevel::Error, + "HDF5 reader for ContourTreeDistributed does not support multiple blocks per rank yet"); + return false; + } + vtkm::Id blockNo = 0; // TODO: Update this if we have multiple blocks per rank + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + + herr_t status; + // Open the file and the dataset + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); + // Get filespace for rank and dimension + hid_t filespace = H5Dget_space(dataset); + // Get number of dimensions in the file dataspace + nDims = H5Sget_simple_extent_ndims(filespace); + if (nDims != 3) + { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, + "HDF5 reader for ContourTreeDistributed requires 3D dataset"); + return false; + } + hsize_t dims[nDims]; // dataset dimensions + globalSize[0] = selectSize[0] < 0 ? dims[0] : selectSize[0]; + globalSize[1] = selectSize[1] < 0 ? dims[1] : selectSize[1]; + globalSize[2] = selectSize[2] < 0 ? dims[2] : selectSize[2]; + // Define the memory space to read dataset. + hid_t dataspace = H5Dget_space(dataset); + // Read a hyperslap + // define the hyperslap + hsize_t count[2]; // size of the hyperslab in the file + hsize_t offset[3]; // hyperslab offset in the file + + // Compute the origin and count + vtkm::Id3 blockSize(vtkm::Id(globalSize[0] / blocksPerDim[0]), + vtkm::Id(globalSize[1] / blocksPerDim[1]), + vtkm::Id(globalSize[2] / blocksPerDim[2])); + vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); + // compute the offset and count for the block for this rank + offset[0] = blockSize[0] * blockIndex[0]; + offset[1] = blockSize[1] * blockIndex[1]; + offset[1] = blockSize[2] * blockIndex[2]; + count[0] = blockSize[0]; + count[1] = blockSize[1]; + count[2] = blockSize[2]; + if (offset[0] + count[0] > globalSize[0]) + { + count[0] = globalSize[0] - offset[0]; + } + if (offset[1] + count[1] > globalSize[1]) + { + count[1] = globalSize[1] - offset[1]; + } + if (offset[2] + count[2] > globalSize[2]) + { + count[2] = globalSize[2] - offset[2]; + } + // Select the data in the dataset + status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); + // Define the memory space for reading + hid_t memspace = H5Screate_simple(nDims, count, NULL); + // Define the hyperslap to read the data into memory + hsize_t offset_out[3]; + offset_out[0] = 0; + offset_out[1] = 0; + offset_out[2] = 0; + status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count, NULL); + // Read data from hyperslab in the file into the hyperslab in + std::size_t numVertices = count[0] * count[1] * count[2]; + std::vector values(numVertices); + { + if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_DOUBLE)) + { + double data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_INT)) + { + int data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_UCHAR)) + { + unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < globalSize[0]; k++) + { + for (vtkm::Id j = 0; j < globalSize[1]; j++) + { + for (vtkm::Id i = 0; i < globalSize[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + } + } + } + } + } + // Release HDF5 resources + H5Sclose(dataspace); + H5Dclose(dataset); + H5Fclose(file); + + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + localBlockIndicesPortal.Set(blockNo, blockIndex); + localBlockOriginsPortal.Set(blockNo, + vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }); + localBlockSizesPortal.Set(blockNo, + vtkm::Id3{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }); + + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return false; +} +#endif + + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; + + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) + { + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } + + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; + + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } + + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); + + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); + + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + { + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } + + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); + + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); + + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); + + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); + + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + } + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); + localBlockOriginsPortal.Set(blockNo, + vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(nDims == 3 ? offset[2] : 0) }); + localBlockSizesPortal.Set(blockNo, + vtkm::Id3{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(nDims == 3 ? dims[2] : 0) }); + + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} + + +/// Read data from a single file and split the data into blocks across ranks +/// This is a simple implementation that will read the full data on all ranks +/// and then extract only the relevant subblock for that rank. +/// The function support reading from BOV as well from ASCII files +/// +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] size The number of MPI ranks +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] numBlocks Number of blocks to use during computation +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each +/// local data block +/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each +/// local data block +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::cont::ArrayHandle& localBlockOrigins, + vtkm::cont::ArrayHandle& localBlockSizes, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + localBlockOrigins.Allocate(blocksPerRank); + localBlockSizes.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); + auto localBlockSizesPortal = localBlockSizes.WritePortal(); + + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; + + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; + + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = + nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = + nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3(0, (blockStart / blockSliceSize), 0)); + localBlockSizesPortal.Set(localBlockIndex, + vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); + localBlockOriginsPortal.Set(localBlockIndex, + vtkm::Id3(0, 0, (blockStart / blockSliceSize))); + localBlockSizesPortal.Set( + localBlockIndex, + vtkm::Id3(static_cast(dims[0]), static_cast(dims[1]), currBlockSize)); + } + + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); + + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} + +#endif From 8d1d713dce576c167395592833c921f03b8b9ebf Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:00:58 -0800 Subject: [PATCH 02/29] Update split data 3D to also support export to HDF5 --- .../contour_tree_distributed/split_data_3d.py | 133 ++++++++++++------ 1 file changed, 90 insertions(+), 43 deletions(-) diff --git a/examples/contour_tree_distributed/split_data_3d.py b/examples/contour_tree_distributed/split_data_3d.py index fa8942b04..83b311c90 100755 --- a/examples/contour_tree_distributed/split_data_3d.py +++ b/examples/contour_tree_distributed/split_data_3d.py @@ -6,16 +6,24 @@ import sys # For readBOV from functools import reduce import operator +try: + import h5py + USE_HDF = True +except: + USE_HDF = False -# Read a 3D text file from disk into a NumPy array -# ... Plain text def read_file(fn): + """ + Read a 3D plain text file from disk into a NumPy array + """ data = np.fromfile(fn, dtype=float, sep=" ") data = data[3:].reshape((int(data[2]),int(data[0]),int(data[1]))) return data -# ... VisItBOV def readBOV(filename): + """ + Read data from a VisIt BOV file + """ with open(filename, 'r') as f: header = dict([(lambda x: (x[0].strip().lower(), x[1].strip()))(l.strip().split(':')) for l in f.readlines()]) if 'data_endian' in header: @@ -32,11 +40,22 @@ def readBOV(filename): return (header['variable'], header['centering'].lower(), np.fromfile(dataname, dtype, count).reshape(tuple(reversed(shape)))) return None -# Save a block from a 3D NumPy array to disk -# Python order is slice, row, col -# TXT file order is row, col, slice -# offset and size are in file order def save_piece(fn, array, offset, n_blocks, block_index, size): + """ + Save a block from a 3D NumPy array to disk. + + Python order is slice, row, col + TXT file order is row, col, slice + offset and size are in file order + + Args: + fn (str): filename + array (np.array) : Array with the full data + offset (tuple) : Tuple of int offsets + n_blocks (tuple) : Tuple of ints with the number of blocks per dimension + block_index (tuple) : Tuple of ints with index of the block + size (tuple) : Tuple of ints with the size of the block in each dimension + """ with open(fn, 'w') as f: perm = [1, 2, 0] f.write('#GLOBAL_EXTENTS ' + ' '.join(map(str, [array.shape[i] for i in perm])) + '\n') @@ -51,51 +70,79 @@ def save_piece(fn, array, offset, n_blocks, block_index, size): np.savetxt(f, array[s, offset[0]:offset[0]+size[0],offset[1]:offset[1]+size[1]], fmt='%.16g') f.write('\n') -# Compute split points for splitting into n blocks def split_points(shape, nblocks): + """ + Compute split points for splitting into n blocks: + + Args: + shape (int): Length of the axis + nblocks (int): Number of blocks to split the axis into + + Return: + List of split points along the axis + """ dx = float(shape-1) / nblocks return [ math.floor(i*dx) for i in range(nblocks)] + [ shape - 1 ] -if len(sys.argv) < 2: - print("Error: Usage split_data_3d.py [| ]", file=sys.stderr) - sys.exit(1) +def save_hdf(filename, data, **kwargs): + """ + Save the data to HDF5. + The axes of the data will be transposed and reorded to match the order of save_piece function. -# Parse parameters -in_filename = sys.argv[1] + Args: + filename (str) : Name fo the HDF5 file + data (np.array): 3D array with the data + kwargs (dict) : Dict with keyword arguments for the h5py create_dataset function + """ + f = h5py.File(filename, 'w') + f.create_dataset(name='data', data=np.swapaxes(np.transpose(data), 0, 1), **kwargs) -name, ext = os.path.splitext(in_filename) -#out_filename_pattern = name + '_split_%d.txt' -out_filename_pattern = sys.argv[2] +if __name__ == '__main__': -n_blocks = (2, 2, 2) -if len(sys.argv) > 3: - if len(sys.argv) >= 6: - n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])) + if len(sys.argv) < 2: + print("Error: Usage split_data_3d.py [| ]", file=sys.stderr) + sys.exit(1) + + # Parse parameters + in_filename = sys.argv[1] + + name, ext = os.path.splitext(in_filename) + #out_filename_pattern = name + '_split_%d.txt' + out_filename_pattern = sys.argv[2] + + n_blocks = (2, 2, 2) + if len(sys.argv) > 3: + if len(sys.argv) >= 6: + n_blocks = (int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])) + else: + n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3])) + + # Read data + if ext == '.bov': + data = readBOV(in_filename)[2] else: - n_blocks = (int(sys.argv[3]), int(sys.argv[3]), int(sys.argv[3])) + data = read_file(in_filename) -# Read data -if ext == '.bov': - data = readBOV(in_filename)[2] -else: - data = read_file(in_filename) + # export to hdf5 as well + if USE_HDF: + save_hdf((out_filename_pattern % 0).replace('.txt', '.h5'), data) -# Python order is slice, row, col -# Compute split points -split_points_s = split_points(data.shape[0], n_blocks[2]) -split_points_r = split_points(data.shape[1], n_blocks[0]) -split_points_c = split_points(data.shape[2], n_blocks[1]) + # Python order is slice, row, col + # Compute split points + split_points_s = split_points(data.shape[0], n_blocks[2]) + split_points_r = split_points(data.shape[1], n_blocks[0]) + split_points_c = split_points(data.shape[2], n_blocks[1]) -# Create the file that records the slice values -slice_filename = name + '_slices.txt' + # Create the file that records the slice values + slice_filename = name + '_slices.txt' -# Save blocks -block_no = 0 -for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])): - for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])): - for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])): - n_s = s_stop - s_start + 1 - n_r = r_stop - r_start + 1 - n_c = c_stop - c_start + 1 - save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s)) - block_no += 1 + # Save blocks + block_no = 0 + for block_index_s, (s_start, s_stop) in enumerate(zip(split_points_s, split_points_s[1:])): + for block_index_r, (r_start, r_stop) in enumerate(zip(split_points_r, split_points_r[1:])): + for block_index_c, (c_start, c_stop) in enumerate(zip(split_points_c, split_points_c[1:])): + n_s = s_stop - s_start + 1 + n_r = r_stop - r_start + 1 + n_c = c_stop - c_start + 1 + save_piece(out_filename_pattern % block_no, data, (r_start, c_start, s_start), n_blocks, (block_index_r, block_index_c, block_index_s), (n_r, n_c, n_s)) + block_no += 1 From ae8de9e25d48f3743bf9464dc82210ec6343c8b9 Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:01:55 -0800 Subject: [PATCH 03/29] Swap HDF5 dims to match presplit data handling --- .../ContourTreeAppDataIO.h | 126 +++++++++++------- 1 file changed, 79 insertions(+), 47 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 20c74dba7..cdc232e0d 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -83,11 +83,12 @@ using namespace H5; - /// Convert a 3D index of a cube to rank index vtkm::Id to1DIndex(vtkm::Id3 idx, vtkm::Id3 dims) { - return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; + // return (idx[2] * dims[0] * dims[1]) + (idx[1] * dims[0]) + idx[0]; + // Swap first and second dimension + return (idx[2] * dims[0] * dims[1]) + (idx[0] * dims[1]) + idx[1]; } /// Convert the rank index to the index of the cube @@ -96,8 +97,9 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) vtkm::Id3 res; res[2] = idx / (dims[0] * dims[1]); idx -= (res[2] * dims[0] * dims[1]); - res[1] = idx / dims[0]; - res[0] = idx % dims[0]; + // Swap index 0 and 1 + res[0] = idx / dims[0]; + res[1] = idx % dims[0]; return res; } @@ -127,7 +129,7 @@ bool read3DHDF5File(const int& mpi_rank, const std::string& dataset_name, const int& blocksPerRank, const vtkm::Id3& blocksPerDim, - const vtkm::Id3 selectSize, + const vtkm::Id3& selectSize, std::vector::size_type& nDims, vtkm::cont::PartitionedDataSet& useDataSet, vtkm::Id3& globalSize, @@ -160,8 +162,14 @@ bool read3DHDF5File(const int& mpi_rank, auto localBlockSizesPortal = localBlockSizes.WritePortal(); herr_t status; + //Set up file access property list with parallel I/O access + //hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + //MPI_Comm comm = MPI_COMM_WORLD; + //MPI_Info info = MPI_INFO_NULL; + //H5Pset_fapl_mpio(plist_id, comm, info); + // Open the file and the dataset - hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); // Get filespace for rank and dimension hid_t filespace = H5Dget_space(dataset); @@ -174,6 +182,7 @@ bool read3DHDF5File(const int& mpi_rank, return false; } hsize_t dims[nDims]; // dataset dimensions + status = H5Sget_simple_extent_dims(filespace, dims, NULL); globalSize[0] = selectSize[0] < 0 ? dims[0] : selectSize[0]; globalSize[1] = selectSize[1] < 0 ? dims[1] : selectSize[1]; globalSize[2] = selectSize[2] < 0 ? dims[2] : selectSize[2]; @@ -181,21 +190,37 @@ bool read3DHDF5File(const int& mpi_rank, hid_t dataspace = H5Dget_space(dataset); // Read a hyperslap // define the hyperslap - hsize_t count[2]; // size of the hyperslab in the file + hsize_t count[3]; // size of the hyperslab in the file hsize_t offset[3]; // hyperslab offset in the file - // Compute the origin and count - vtkm::Id3 blockSize(vtkm::Id(globalSize[0] / blocksPerDim[0]), - vtkm::Id(globalSize[1] / blocksPerDim[1]), - vtkm::Id(globalSize[2] / blocksPerDim[2])); + vtkm::Id3 blockSize(std::floor(vtkm::Id(globalSize[0] / blocksPerDim[0])), + std::floor(vtkm::Id(globalSize[1] / blocksPerDim[1])), + std::floor(vtkm::Id(globalSize[2] / blocksPerDim[2]))); vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); // compute the offset and count for the block for this rank offset[0] = blockSize[0] * blockIndex[0]; offset[1] = blockSize[1] * blockIndex[1]; - offset[1] = blockSize[2] * blockIndex[2]; + offset[2] = blockSize[2] * blockIndex[2]; count[0] = blockSize[0]; count[1] = blockSize[1]; count[2] = blockSize[2]; + // add ghost zone on the left + if (blockIndex[0] > 0) + { + offset[0] = offset[0] - 1; + count[0] = count[0] + 1; + } + if (blockIndex[1] > 0) + { + offset[1] = offset[1] - 1; + count[1] = count[1] + 1; + } + if (blockIndex[2] > 0) + { + offset[2] = offset[2] - 1; + count[2] = count[2] + 1; + } + // Check that we are not running over the end of the dataset if (offset[0] + count[0] > globalSize[0]) { count[0] = globalSize[0] - offset[0]; @@ -208,16 +233,16 @@ bool read3DHDF5File(const int& mpi_rank, { count[2] = globalSize[2] - offset[2]; } - // Select the data in the dataset + blockSize = vtkm::Id3{ static_cast(count[0]), + static_cast(count[1]), + static_cast(count[2]) }; + vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); // Define the memory space for reading hid_t memspace = H5Screate_simple(nDims, count, NULL); - // Define the hyperslap to read the data into memory - hsize_t offset_out[3]; - offset_out[0] = 0; - offset_out[1] = 0; - offset_out[2] = 0; - status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count, NULL); // Read data from hyperslab in the file into the hyperslab in std::size_t numVertices = count[0] * count[1] * count[2]; std::vector values(numVertices); @@ -227,13 +252,13 @@ bool read3DHDF5File(const int& mpi_rank, double data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -243,13 +268,13 @@ bool read3DHDF5File(const int& mpi_rank, int data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -259,13 +284,13 @@ bool read3DHDF5File(const int& mpi_rank, unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < globalSize[0]; k++) + for (vtkm::Id k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < globalSize[1]; j++) + for (vtkm::Id j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < globalSize[2]; i++) + for (vtkm::Id i = 0; i < count[2]; i++) { - values[to1DIndex(vtkm::Id3(k, j, i), globalSize)] = ValueType(data_out[k][j][i]); + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } } } @@ -280,27 +305,33 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::DataSetBuilderUniform dsb; vtkm::cont::DataSet ds; VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(count[0]), - static_cast(count[1]), - static_cast(count[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), + + /*const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) };*/ + // Swap first and second dimenion here as well for consistency + const vtkm::Vec v_origin{ static_cast(offset[1]), + static_cast(offset[0]), static_cast(offset[2]) }; + const vtkm::Id3 v_dims{ static_cast(blockSize[1]), + static_cast(blockSize[0]), + static_cast(blockSize[2]) }; vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); - localBlockIndicesPortal.Set(blockNo, blockIndex); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(count[0]), - static_cast(count[1]), - static_cast(count[2]) }); - + // Original order + //localBlockIndicesPortal.Set(blockNo, blockIndex); + //localBlockOriginsPortal.Set(blockNo, blockOrigin); + //localBlockSizesPortal.Set(blockNo, blockSize); + localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + localBlockOriginsPortal.Set(blockNo, vtkm::Id3(blockOrigin[1], blockOrigin[0], blockOrigin[2])); + localBlockSizesPortal.Set(blockNo, vtkm::Id3(blockSize[1], blockSize[0], blockSize[2])); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + // Swap the dimensions to match the pre-split file reader + globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); // Finished data read currTime = totalTime.GetElapsedTime(); dataReadTime = currTime - prevTime; @@ -308,7 +339,7 @@ bool read3DHDF5File(const int& mpi_rank, currTime = totalTime.GetElapsedTime(); buildDatasetTime = currTime - prevTime; - return false; + return true; } #endif @@ -543,6 +574,7 @@ bool readPreSplitFiles(const int& rank, vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); } + ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); From da38ff2a61a7956bfd508cafa5159d4c9da97f98 Mon Sep 17 00:00:00 2001 From: oruebel Date: Thu, 13 Jan 2022 20:04:43 -0800 Subject: [PATCH 04/29] Update distributed app --- .../ContourTreeApp.cxx | 46 ++++++++++--------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index d51e6f110..1f086033b 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -393,11 +393,11 @@ int main(int argc, char* argv[]) << " forwardSummary=" << forwardSummary << std::endl << " nblocks=" << numBlocks << std::endl #ifdef ENABLE_HDFIO - << " dataset=" << dataset_name << std::endl + << " dataset=" << dataset_name << " (HDF5 only)" << std::endl << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," - << blocksPerDimIn[2] << std::endl + << blocksPerDimIn[2] << " (HDF5 only)" << std::endl << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," - << selectSize[2] << std::endl + << selectSize[2] << " (HDF5 only)" << std::endl #endif ); } @@ -528,27 +528,31 @@ int main(int argc, char* argv[]) readOk = false; #endif } - readOk = readSingleBlockFile( - // inputs - rank, - size, - filename, - numBlocks, - blocksPerRank, - // outputs - nDims, - useDataSet, - globalSize, - blocksPerDim, - localBlockIndices, - localBlockOrigins, - localBlockSizes, - // output timers - dataReadTime, - buildDatasetTime); + else + { + readOk = readSingleBlockFile( + // inputs + rank, + size, + filename, + numBlocks, + blocksPerRank, + // outputs + nDims, + useDataSet, + globalSize, + blocksPerDim, + localBlockIndices, + localBlockOrigins, + localBlockSizes, + // output timers + dataReadTime, + buildDatasetTime); + } } if (!readOk) { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data read failed."); MPI_Finalize(); return EXIT_FAILURE; } From a30d115789d4b170b7bb6cce54cb1a7f480b02ba Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 7 Mar 2022 03:19:52 -0800 Subject: [PATCH 05/29] Use Parallel HDF5 and fix axis swap bug --- .../ContourTreeApp.cxx | 41 ++++++++++++++++- .../ContourTreeAppDataIO.h | 45 ++++++++++++++----- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 1f086033b..6a3f683d4 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -100,7 +100,7 @@ VTKM_THIRDPARTY_POST_INCLUDE #include #include -using ValueType = vtkm::Float64; +using ValueType = vtkm::Float32; #define SINGLE_FILE_STDOUT_STDERR @@ -441,6 +441,30 @@ int main(int argc, char* argv[]) return 255; } + VTKM_LOG_S(exampleLogLevel, + std::endl + << " ------------ Settings -----------" << std::endl + << " filename=" << filename << std::endl + << " preSplitFiles=" << preSplitFiles << std::endl + << " device=" << device.GetName() << std::endl + << " mc=" << useMarchingCubes << std::endl + << " useFullBoundary=" << !useBoundaryExtremaOnly << std::endl + << " saveDot=" << saveDotFiles << std::endl + << " saveOutputData=" << saveOutputData << std::endl + << " forwardSummary=" << forwardSummary << std::endl + << " numBlocks=" << numBlocks << std::endl + << " augmentHierarchicalTree=" << augmentHierarchicalTree << std::endl + << " numRanks=" << size << std::endl + << " rank=" << rank << std::endl +#ifdef ENABLE_HDFIO + << " dataset=" << dataset_name << " (HDF5 only)" << std::endl + << " blocksPerDim=" << blocksPerDimIn[0] << "," << blocksPerDimIn[1] << "," + << blocksPerDimIn[2] << " (HDF5 only)" << std::endl + << " selectSize=" << selectSize[0] << "," << selectSize[1] << "," << selectSize[2] + << " (HDF5 only)" << std::endl +#endif + ); + // Measure our time for startup currTime = totalTime.GetElapsedTime(); vtkm::Float64 startUpTime = currTime - prevTime; @@ -505,7 +529,7 @@ int main(int argc, char* argv[]) #ifdef ENABLE_HDFIO blocksPerDim = blocksPerDimIn; readOk = read3DHDF5File( - // inputs + // inputs (blocksPerDim is being modified to swap dimension to fit we re-ordering of dimension) rank, filename, dataset_name, @@ -590,6 +614,19 @@ int main(int argc, char* argv[]) vtkm::Float64 dataReadSyncTime = currTime - prevTime; prevTime = currTime; + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << localBlockSizesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blocksPerDim=" << blocksPerDim << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl + + ); + // Convert the mesh of values into contour tree, pairs of vertex ids vtkm::filter::ContourTreeUniformDistributed filter(blocksPerDim, globalSize, diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index cdc232e0d..1d08b608b 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -79,8 +79,11 @@ #ifdef ENABLE_HDFIO -#include "H5Cpp.h" -using namespace H5; +// #include "H5Cpp.h" +#include "hdf5.h" +//using namespace H5; + +#include /// Convert a 3D index of a cube to rank index @@ -98,8 +101,12 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) res[2] = idx / (dims[0] * dims[1]); idx -= (res[2] * dims[0] * dims[1]); // Swap index 0 and 1 - res[0] = idx / dims[0]; - res[1] = idx % dims[0]; + // res[0] = idx / dims[0]; + // res[1] = idx % dims[0]; + // Don't swap index here, because this function is used with the original + // HDF5 layout and the 3D index is swapped later on + res[1] = idx / dims[0]; + res[0] = idx % dims[0]; return res; } @@ -128,7 +135,7 @@ bool read3DHDF5File(const int& mpi_rank, const std::string& filename, const std::string& dataset_name, const int& blocksPerRank, - const vtkm::Id3& blocksPerDim, + vtkm::Id3& blocksPerDim, const vtkm::Id3& selectSize, std::vector::size_type& nDims, vtkm::cont::PartitionedDataSet& useDataSet, @@ -163,13 +170,14 @@ bool read3DHDF5File(const int& mpi_rank, herr_t status; //Set up file access property list with parallel I/O access - //hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); - //MPI_Comm comm = MPI_COMM_WORLD; - //MPI_Info info = MPI_INFO_NULL; - //H5Pset_fapl_mpio(plist_id, comm, info); + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + H5Pset_fapl_mpio(plist_id, comm, info); // Open the file and the dataset - hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// + //hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); // plist_id);// + hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id); // hid_t dataset = H5Dopen(file, dataset_name.c_str(), H5P_DEFAULT); // Get filespace for rank and dimension hid_t filespace = H5Dget_space(dataset); @@ -192,11 +200,13 @@ bool read3DHDF5File(const int& mpi_rank, // define the hyperslap hsize_t count[3]; // size of the hyperslab in the file hsize_t offset[3]; // hyperslab offset in the file + // Compute the origin and count vtkm::Id3 blockSize(std::floor(vtkm::Id(globalSize[0] / blocksPerDim[0])), std::floor(vtkm::Id(globalSize[1] / blocksPerDim[1])), std::floor(vtkm::Id(globalSize[2] / blocksPerDim[2]))); vtkm::Id3 blockIndex = to3DIndex(mpi_rank, blocksPerDim); + // compute the offset and count for the block for this rank offset[0] = blockSize[0] * blockIndex[0]; offset[1] = blockSize[1] * blockIndex[1]; @@ -239,6 +249,7 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), static_cast(offset[1]), static_cast(offset[2]) }; + // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); // Define the memory space for reading @@ -321,6 +332,7 @@ bool read3DHDF5File(const int& mpi_rank, ds.AddPointField("values", values); // and add to partition useDataSet.AppendPartition(ds); + // Original order //localBlockIndicesPortal.Set(blockNo, blockIndex); //localBlockOriginsPortal.Set(blockNo, blockOrigin); @@ -332,6 +344,19 @@ bool read3DHDF5File(const int& mpi_rank, // dims[0] -> col; dims[1] -> row, dims[2] ->slice // Swap the dimensions to match the pre-split file reader globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); + // Swap also the blocks per dimension accordingly + blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << localBlockSizesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + // Finished data read currTime = totalTime.GetElapsedTime(); dataReadTime = currTime - prevTime; From 5aed80d7d3c1ffd03af47d1ffbe95ab39c0c661c Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 14 Mar 2022 15:09:43 -0700 Subject: [PATCH 06/29] Fix HDF5 float data read bug --- .../ContourTreeAppDataIO.h | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 1d08b608b..a1bff80a1 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -274,6 +274,22 @@ bool read3DHDF5File(const int& mpi_rank, } } } + else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_FLOAT)) + { + float data_out[count[0]][count[1]][count[2]]; // output buffer + status = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_out); + // Copy data to 1D array of the expected ValueType + for (vtkm::Id k = 0; k < count[0]; k++) + { + for (vtkm::Id j = 0; j < count[1]; j++) + { + for (vtkm::Id i = 0; i < count[2]; i++) + { + values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); + } + } + } + } else if (H5Tequal(H5Dget_type(dataset), H5T_NATIVE_INT)) { int data_out[count[0]][count[1]][count[2]]; // output buffer @@ -306,6 +322,11 @@ bool read3DHDF5File(const int& mpi_rank, } } } + else + { + VTKM_LOG_S(vtkm::cont::LogLevel::Error, "Data type not supported by the example HDF5 reader"); + throw "Data type not supported by the example HDF5 reader"; + } } // Release HDF5 resources H5Sclose(dataspace); From e076c11a26f44ab408f849d578eb6af2d1affc7c Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 14:24:45 -0700 Subject: [PATCH 07/29] Add missing linking to to vtkmio --- vtkm/io/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vtkm/io/CMakeLists.txt b/vtkm/io/CMakeLists.txt index 88016c537..41f86b147 100644 --- a/vtkm/io/CMakeLists.txt +++ b/vtkm/io/CMakeLists.txt @@ -85,7 +85,7 @@ vtkm_library( target_link_libraries(vtkm_io PUBLIC vtkm_cont PRIVATE vtkm_lodepng) if (VTKm_ENABLE_HDF5_IO) target_include_directories(vtkm_io PRIVATE $) - target_link_libraries(vtkm_io PRIVATE ${HDF5_HL_LIBRARIES}) + target_link_libraries(vtkm_io PRIVATE ${HDF5_HL_LIBRARIES} ${HDF5_LIBRARIES}) endif() add_subdirectory(internal) From 2f488b485d77c5c0f4734c699a618a06c45ffd24 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 14:25:20 -0700 Subject: [PATCH 08/29] Add missing HDF5 include/link definitions distributed contour tree app --- examples/contour_tree_distributed/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 57997fdb6..7fe91ef0e 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -78,6 +78,8 @@ if (VTKm_ENABLE_MPI) endif() if (VTKm_ENABLE_HDF5_IO) target_compile_definitions(ContourTree_Distributed PRIVATE "ENABLE_HDFIO") + target_include_directories(ContourTree_Distributed PRIVATE ${HDF5_INCLUDE_DIR}) + target_link_libraries(ContourTree_Distributed ${HDF5_LIBRARIES}) endif () add_executable(TreeCompiler TreeCompilerApp.cxx) From 1b1636b6d8de831f967c79fd5d8f4f548d14a1d4 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 19:00:57 -0400 Subject: [PATCH 09/29] Update includes for distributed contour tree app to filter refactor --- .../contour_tree_distributed/BranchCompilerApp.cxx | 2 +- examples/contour_tree_distributed/ContourTreeApp.cxx | 10 +++++----- .../contour_tree_distributed/ContourTreeAppDataIO.h | 2 +- examples/contour_tree_distributed/TreeCompilerApp.cxx | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/contour_tree_distributed/BranchCompilerApp.cxx b/examples/contour_tree_distributed/BranchCompilerApp.cxx index 67be61ec0..b613a508c 100644 --- a/examples/contour_tree_distributed/BranchCompilerApp.cxx +++ b/examples/contour_tree_distributed/BranchCompilerApp.cxx @@ -65,7 +65,7 @@ // //======================================================================================= -#include +#include int main() { // main() diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 6a3f683d4..6dbbe235f 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -76,11 +76,11 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include // clang-format off VTKM_THIRDPARTY_PRE_INCLUDE diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index a1bff80a1..3662997ee 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -65,7 +65,7 @@ #include #include -#include +#include #include #include diff --git a/examples/contour_tree_distributed/TreeCompilerApp.cxx b/examples/contour_tree_distributed/TreeCompilerApp.cxx index 75665010f..319fce902 100644 --- a/examples/contour_tree_distributed/TreeCompilerApp.cxx +++ b/examples/contour_tree_distributed/TreeCompilerApp.cxx @@ -61,7 +61,7 @@ //============================================================================== #include -#include +#include // main routine int main(int argc, char** argv) From a8786cd9782d9b3349989c68560b719b983af228 Mon Sep 17 00:00:00 2001 From: "Gunther H. Weber" Date: Fri, 1 Jul 2022 19:01:21 -0400 Subject: [PATCH 10/29] Added missing iomanip include --- .../worklet/contourtree_distributed/BranchCompiler.h | 1 + 1 file changed, 1 insertion(+) diff --git a/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h b/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h index d874044c6..0b838d3fd 100644 --- a/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h +++ b/vtkm/filter/scalar_topology/worklet/contourtree_distributed/BranchCompiler.h @@ -54,6 +54,7 @@ #ifndef _BRANCHCOMPILER_H_ #define _BRANCHCOMPILER_H_ +#include #include #include #include From eda4b1be11eb15a967513ae840186c1b2c09ded2 Mon Sep 17 00:00:00 2001 From: oruebel Date: Wed, 7 Sep 2022 05:11:39 -0700 Subject: [PATCH 11/29] Run branchDecomposition without requiring save and add timer --- .../ContourTreeApp.cxx | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 6dbbe235f..56e317fc0 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -648,22 +648,31 @@ int main(int argc, char* argv[]) vtkm::Float64 computeContourTreeTime = currTime - prevTime; prevTime = currTime; - // Make sure that all ranks have started up before we start the data read + // Record the time to synchronize after the filter has finished MPI_Barrier(comm); currTime = totalTime.GetElapsedTime(); vtkm::Float64 postFilterSyncTime = currTime - prevTime; prevTime = currTime; + // Compute branch decomposition if requested + vtkm::cont::PartitionedDataSet bd_result; + if (computeHierarchicalVolumetricBranchDecomposition) + { + vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter( + blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); + bd_result = bd_filter.Execute(result); + } + currTime = totalTime.GetElapsedTime(); + vtkm::Float64 branchDecompTime = currTime - prevTime; + prevTime = currTime; + + // Save output if (saveOutputData) { if (augmentHierarchicalTree) { if (computeHierarchicalVolumetricBranchDecomposition) { - vtkm::filter::scalar_topology::DistributedBranchDecompositionFilter bd_filter( - blocksPerDim, globalSize, localBlockIndices, localBlockOrigins, localBlockSizes); - auto bd_result = bd_filter.Execute(result); - for (vtkm::Id ds_no = 0; ds_no < result.GetNumberOfPartitions(); ++ds_no) { auto ds = bd_result.GetPartition(ds_no); @@ -780,6 +789,8 @@ int main(int argc, char* argv[]) << ": " << computeContourTreeTime << " seconds" << std::endl << std::setw(42) << std::left << " Post filter Sync" << ": " << postFilterSyncTime << " seconds" << std::endl + << std::setw(42) << std::left << " Branch Decomposition" + << ": " << branchDecompTime << " seconds" << std::endl << std::setw(42) << std::left << " Save Tree Compiler Data" << ": " << saveOutputDataTime << " seconds" << std::endl << std::setw(42) << std::left << " Total Time" From 63ec3f3bcb988bce222053679c728d0408f08c1b Mon Sep 17 00:00:00 2001 From: Oliver Ruebel Date: Tue, 20 Sep 2022 01:22:16 -0700 Subject: [PATCH 12/29] Updated contour tree distributed IO to use CellSetStructured --- .../ContourTreeAppDataIO.h | 999 +++++++++--------- 1 file changed, 489 insertions(+), 510 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index 3662997ee..eb31ced53 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -123,10 +124,6 @@ vtkm::Id3 to3DIndex(vtkm::Id idx, vtkm::Id3 dims) /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) /// @param[out] localBlockIndices Array with the (x,y,z) index of each local data block with /// with respect to blocksPerDim -/// @param[out] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[out] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block /// @param[out] dataReadTime Time to read the data /// @param[out] buildDatasetTime Time to construct the VTKm datasets /// @returns bool indicating whether the read was successful or not @@ -141,8 +138,6 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::PartitionedDataSet& useDataSet, vtkm::Id3& globalSize, vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, vtkm::Float64& dataReadTime, vtkm::Float64& buildDatasetTime) { @@ -162,11 +157,7 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::Id blockNo = 0; // TODO: Update this if we have multiple blocks per rank localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); herr_t status; //Set up file access property list with parallel I/O access @@ -338,9 +329,13 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::DataSet ds; VTKM_ASSERT(nDims == 3); - /*const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) };*/ + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + // Swap the dimensions to match the pre-split file reader + globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); + // Swap also the blocks per dimension accordingly + blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + // Swap first and second dimenion here as well for consistency const vtkm::Vec v_origin{ static_cast(offset[1]), static_cast(offset[0]), @@ -350,535 +345,519 @@ bool read3DHDF5File(const int& mpi_rank, static_cast(blockSize[2]) }; vtkm::Vec v_spacing(1, 1, 1); ds = dsb.Create(v_dims, v_origin, v_spacing); - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ + v_origin + { + [0], + v_origin {[1], v_origin{ + [2] }); + ds.SetCellSet(cs); - // Original order - //localBlockIndicesPortal.Set(blockNo, blockIndex); - //localBlockOriginsPortal.Set(blockNo, blockOrigin); - //localBlockSizesPortal.Set(blockNo, blockSize); - localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); - localBlockOriginsPortal.Set(blockNo, vtkm::Id3(blockOrigin[1], blockOrigin[0], blockOrigin[2])); - localBlockSizesPortal.Set(blockNo, vtkm::Id3(blockSize[1], blockSize[0], blockSize[2])); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - // Swap the dimensions to match the pre-split file reader - globalSize = vtkm::Id3(globalSize[1], globalSize[0], globalSize[2]); - // Swap also the blocks per dimension accordingly - blocksPerDim = vtkm::Id3(blocksPerDim[1], blocksPerDim[0], blocksPerDim[2]); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Log information of the (first) local data block - VTKM_LOG_S(vtkm::cont::LogLevel::Info, - "" << std::setw(42) << std::left << "blockSize" - << ":" << localBlockSizesPortal.Get(0) << std::endl - << std::setw(42) << std::left << "blockOrigin=" << localBlockOriginsPortal.Get(0) - << std::endl - << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) - << std::endl - << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + // Swap order to match pre-splot + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); - // Finished data read - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << v_dims << std::endl + << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl + << std::setw(42) << std::left + << "blockIndices=" << localBlockIndicesPortal.Get(0) << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; + } #endif -/// Read data from pre-split ASCII files -/// @param[in] rank The current MPI rank the function is called from -/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block -/// @param[in] blocksPerRank Number of data blocks to process on each rank -/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) -/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter -/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) -/// @param[in] blocksPerDim Number of data blocks used in each data dimension -/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with -/// with respect to blocksPerDim -/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block -/// @param[in] dataReadTime Time to read the data -/// @param[in] buildDatasetTime Time to construct the VTKm datasets -/// @returns bool indicating whether the read was successful or not -template -bool readPreSplitFiles(const int& rank, - const std::string& filename, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) -{ - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; - - localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; - - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - return false; - } - - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; - - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - return false; - } - - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); - - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); - - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + /// Read data from pre-split ASCII files + /// @param[in] rank The current MPI rank the function is called from + /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block + /// @param[in] blocksPerRank Number of data blocks to process on each rank + /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) + /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter + /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) + /// @param[in] blocksPerDim Number of data blocks used in each data dimension + /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with + /// with respect to blocksPerDim + /// @param[in] dataReadTime Time to read the data + /// @param[in] buildDatasetTime Time to construct the VTKm datasets + /// @returns bool indicating whether the read was successful or not + template + bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - return false; - } - } + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - return false; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - return false; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) + { + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - return false; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - return false; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ + static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) + { + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - } + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - localBlockIndicesPortal.Set(blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); - localBlockOriginsPortal.Set(blockNo, - vtkm::Id3{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(nDims == 3 ? offset[2] : 0) }); - localBlockSizesPortal.Set(blockNo, - vtkm::Id3{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(nDims == 3 ? dims[2] : 0) }); + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast(std::accumulate( + dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } -/// Read data from a single file and split the data into blocks across ranks -/// This is a simple implementation that will read the full data on all ranks -/// and then extract only the relevant subblock for that rank. -/// The function support reading from BOV as well from ASCII files -/// -/// @param[in] rank The current MPI rank the function is called from -/// @param[in] size The number of MPI ranks -/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block -/// @param[in] numBlocks Number of blocks to use during computation -/// @param[in] blocksPerRank Number of data blocks to process on each rank -/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) -/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter -/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) -/// @param[in] blocksPerDim Number of data blocks used in each data dimension -/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with -/// with respect to blocksPerDim -/// @param[in] localBlockOrigins Array with the (x,y,z) origin (with regard to mesh index) of each -/// local data block -/// @param[in] localBlockSizes Array with the sizes (i.e., extends in number of mesh points) of each -/// local data block -/// @param[in] dataReadTime Time to read the data -/// @param[in] buildDatasetTime Time to construct the VTKm datasets -/// @returns bool indicating whether the read was successful or not -template -bool readSingleBlockFile(const int& rank, - const int& size, - const std::string& filename, - const int& numBlocks, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::cont::ArrayHandle& localBlockOrigins, - vtkm::cont::ArrayHandle& localBlockSizes, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) -{ - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } - localBlockIndices.Allocate(blocksPerRank); - localBlockOrigins.Allocate(blocksPerRank); - localBlockSizes.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - auto localBlockOriginsPortal = localBlockOrigins.WritePortal(); - auto localBlockSizesPortal = localBlockSizes.WritePortal(); + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; - vtkm::cont::DataSet inDataSet; - // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. - std::vector values; - std::vector dims; + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); + ds.SetCellSet(cs); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); + ds.SetCellSet(cs); + } - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) - { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU - } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + localBlockIndicesPortal.Set( + blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast( - std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = - nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - return false; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = - nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, (blockStart / blockSliceSize), 0)); - localBlockSizesPortal.Set(localBlockIndex, - vtkm::Id3(static_cast(dims[0]), currBlockSize, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - localBlockOriginsPortal.Set(localBlockIndex, - vtkm::Id3(0, 0, (blockStart / blockSliceSize))); - localBlockSizesPortal.Set( - localBlockIndex, - vtkm::Id3(static_cast(dims[0]), static_cast(dims[1]), currBlockSize)); + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; } - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; -} + /// Read data from a single file and split the data into blocks across ranks + /// This is a simple implementation that will read the full data on all ranks + /// and then extract only the relevant subblock for that rank. + /// The function support reading from BOV as well from ASCII files + /// + /// @param[in] rank The current MPI rank the function is called from + /// @param[in] size The number of MPI ranks + /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block + /// @param[in] numBlocks Number of blocks to use during computation + /// @param[in] blocksPerRank Number of data blocks to process on each rank + /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) + /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter + /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) + /// @param[in] blocksPerDim Number of data blocks used in each data dimension + /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with + /// with respect to blocksPerDim + /// @param[in] dataReadTime Time to read the data + /// @param[in] buildDatasetTime Time to construct the VTKm datasets + /// @returns bool indicating whether the read was successful or not + template + bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) + { + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; + + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; + + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); + + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast(std::accumulate( + dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) + : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = nDims == 2 ? static_cast(dims[0]) + : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); + } + + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); + + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; + } #endif From fbc313186f7e5e68e101e627d453f11656086d48 Mon Sep 17 00:00:00 2001 From: oruebel Date: Tue, 20 Sep 2022 01:42:15 -0700 Subject: [PATCH 13/29] Fix error in ContourTreeAppDataIO --- .../ContourTreeAppDataIO.h | 951 +++++++++--------- 1 file changed, 471 insertions(+), 480 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index eb31ced53..badbe61d1 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -348,516 +348,507 @@ bool read3DHDF5File(const int& mpi_rank, vtkm::cont::CellSetStructured<3> cs; cs.SetPointDimensions(v_dims); cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3{ - v_origin + cs.SetGlobalPointIndexStart(vtkm::Id3{ v_origin[0], v_origin[1], v_origin[2] }); + ds.SetCellSet(cs); + + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); + + // Swap order to match pre-splot + localBlockIndicesPortal.Set(blockNo, vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + + // Log information of the (first) local data block + VTKM_LOG_S(vtkm::cont::LogLevel::Info, + "" << std::setw(42) << std::left << "blockSize" + << ":" << v_dims << std::endl + << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl + << std::setw(42) << std::left << "blockIndices=" << localBlockIndicesPortal.Get(0) + << std::endl + << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + + // Finished data read + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} +#endif + + + +/// Read data from pre-split ASCII files +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readPreSplitFiles(const int& rank, + const std::string& filename, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; + + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); + for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) + { + // Translate pattern into filename for this block + char block_filename[256]; + snprintf(block_filename, + sizeof(block_filename), + filename.c_str(), + static_cast(rank * blocksPerRank + blockNo)); + std::cout << "Reading file " << block_filename << std::endl; + + // Open file + std::ifstream inFile(block_filename); + if (!inFile.is_open() || inFile.bad()) { - [0], - v_origin {[1], v_origin{ - [2] }); - ds.SetCellSet(cs); + std::cerr << "Error: Couldn't open file " << block_filename << std::endl; + return false; + } - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + // Read header with dimensions + std::string line; + std::string tag; + vtkm::Id dimVertices; - // Swap order to match pre-splot - localBlockIndicesPortal.Set(blockNo, - vtkm::Id3(blockIndex[1], blockIndex[0], blockIndex[2])); + getline(inFile, line); + std::istringstream global_extents_stream(line); + global_extents_stream >> tag; + if (tag != "#GLOBAL_EXTENTS") + { + std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; + return false; + } - // Log information of the (first) local data block - VTKM_LOG_S(vtkm::cont::LogLevel::Info, - "" << std::setw(42) << std::left << "blockSize" - << ":" << v_dims << std::endl - << std::setw(42) << std::left << "blockOrigin=" << v_origin << std::endl - << std::setw(42) << std::left - << "blockIndices=" << localBlockIndicesPortal.Get(0) << std::endl - << std::setw(42) << std::left << "globalSize=" << globalSize << std::endl); + std::vector global_extents; + while (global_extents_stream >> dimVertices) + global_extents.push_back(dimVertices); - // Finished data read - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(global_extents[0], global_extents[1]); - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; - } -#endif - - - - /// Read data from pre-split ASCII files - /// @param[in] rank The current MPI rank the function is called from - /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block - /// @param[in] blocksPerRank Number of data blocks to process on each rank - /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) - /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter - /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) - /// @param[in] blocksPerDim Number of data blocks used in each data dimension - /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with - /// with respect to blocksPerDim - /// @param[in] dataReadTime Time to read the data - /// @param[in] buildDatasetTime Time to construct the VTKm datasets - /// @returns bool indicating whether the read was successful or not - template - bool readPreSplitFiles(const int& rank, - const std::string& filename, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) + if (blockNo == 0) + { // First block: Set globalSize + globalSize = + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; + } + else + { // All other blocks: Consistency check of globalSize + if (globalSize != + vtkm::Id3{ static_cast(global_extents[0]), + static_cast(global_extents[1]), + static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) { - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; + std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; + return false; + } + } - localBlockIndices.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - for (int blockNo = 0; blockNo < blocksPerRank; ++blockNo) - { - // Translate pattern into filename for this block - char block_filename[256]; - snprintf(block_filename, - sizeof(block_filename), - filename.c_str(), - static_cast(rank * blocksPerRank + blockNo)); - std::cout << "Reading file " << block_filename << std::endl; + getline(inFile, line); + std::istringstream offset_stream(line); + offset_stream >> tag; + if (tag != "#OFFSET") + { + std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; + return false; + } + std::vector offset; + while (offset_stream >> dimVertices) + offset.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(offset[0], offset[1]); - // Open file - std::ifstream inFile(block_filename); - if (!inFile.is_open() || inFile.bad()) - { - std::cerr << "Error: Couldn't open file " << block_filename << std::endl; - return false; - } + getline(inFile, line); + std::istringstream bpd_stream(line); + bpd_stream >> tag; + if (tag != "#BLOCKS_PER_DIM") + { + std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; + return false; + } + std::vector bpd; + while (bpd_stream >> dimVertices) + bpd.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(bpd[0], bpd[1]); - // Read header with dimensions - std::string line; - std::string tag; - vtkm::Id dimVertices; + getline(inFile, line); + std::istringstream blockIndex_stream(line); + blockIndex_stream >> tag; + if (tag != "#BLOCK_INDEX") + { + std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; + return false; + } + std::vector blockIndex; + while (blockIndex_stream >> dimVertices) + blockIndex.push_back(dimVertices); + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(blockIndex[0], blockIndex[1]); - getline(inFile, line); - std::istringstream global_extents_stream(line); - global_extents_stream >> tag; - if (tag != "#GLOBAL_EXTENTS") - { - std::cerr << "Error: Expected #GLOBAL_EXTENTS, got " << tag << std::endl; - return false; - } + getline(inFile, line); + std::istringstream linestream(line); + std::vector dims; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } - std::vector global_extents; - while (global_extents_stream >> dimVertices) - global_extents.push_back(dimVertices); + if (dims.size() != global_extents.size() || dims.size() != offset.size()) + { + std::cerr << "Error: Dimension mismatch" << std::endl; + return false; + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(global_extents[0], global_extents[1]); + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - if (blockNo == 0) - { // First block: Set globalSize - globalSize = - vtkm::Id3{ static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }; - } - else - { // All other blocks: Consistency check of globalSize - if (globalSize != - vtkm::Id3{ - static_cast(global_extents[0]), - static_cast(global_extents[1]), - static_cast(global_extents.size() > 2 ? global_extents[2] : 1) }) - { - std::cerr << "Error: Global extents mismatch between blocks!" << std::endl; - return false; - } - } + // Check for fatal input errors + // Check that the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims + << "D. " + "The input data must be either 2D or 3D."); - getline(inFile, line); - std::istringstream offset_stream(line); - offset_stream >> tag; - if (tag != "#OFFSET") - { - std::cerr << "Error: Expected #OFFSET, got " << tag << std::endl; - return false; - } - std::vector offset; - while (offset_stream >> dimVertices) - offset.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(offset[0], offset[1]); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } - getline(inFile, line); - std::istringstream bpd_stream(line); - bpd_stream >> tag; - if (tag != "#BLOCKS_PER_DIM") - { - std::cerr << "Error: Expected #BLOCKS_PER_DIM, got " << tag << std::endl; - return false; - } - std::vector bpd; - while (bpd_stream >> dimVertices) - bpd.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(bpd[0], bpd[1]); + // Read data + std::vector values(numVertices); + if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) + { + inFile.read(reinterpret_cast(values.data()), + static_cast(numVertices * sizeof(ValueType))); + } + else + { + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + } - getline(inFile, line); - std::istringstream blockIndex_stream(line); - blockIndex_stream >> tag; - if (tag != "#BLOCK_INDEX") - { - std::cerr << "Error: Expected #BLOCK_INDEX, got " << tag << std::endl; - return false; - } - std::vector blockIndex; - while (blockIndex_stream >> dimVertices) - blockIndex.push_back(dimVertices); - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(blockIndex[0], blockIndex[1]); + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; - getline(inFile, line); - std::istringstream linestream(line); - std::vector dims; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } + // Create vtk-m data set + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + if (nDims == 2) + { + const vtkm::Id2 v_dims{ + static_cast(dims[0]), + static_cast(dims[1]), + }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]) }; + const vtkm::Vec v_spacing{ 1, 1 }; + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); + ds.SetCellSet(cs); + } + else + { + VTKM_ASSERT(nDims == 3); + const vtkm::Id3 v_dims{ static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2]) }; + const vtkm::Vec v_origin{ static_cast(offset[0]), + static_cast(offset[1]), + static_cast(offset[2]) }; + vtkm::Vec v_spacing(1, 1, 1); + ds = dsb.Create(v_dims, v_origin, v_spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(v_dims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); + ds.SetCellSet(cs); + } - if (dims.size() != global_extents.size() || dims.size() != offset.size()) - { - std::cerr << "Error: Dimension mismatch" << std::endl; - return false; - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); + ds.AddPointField("values", values); + // and add to partition + useDataSet.AppendPartition(ds); - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast(std::accumulate( - dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + localBlockIndicesPortal.Set(blockNo, + vtkm::Id3{ static_cast(blockIndex[0]), + static_cast(blockIndex[1]), + static_cast(nDims == 3 ? blockIndex[2] : 0) }); - // Check for fatal input errors - // Check that the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. " - "The input data must be either 2D or 3D."); + if (blockNo == 0) + { + blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), + static_cast(bpd[1]), + static_cast(nDims == 3 ? bpd[2] : 1) }; + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - // Read data - std::vector values(numVertices); - if (filename.compare(filename.length() - 5, 5, ".bdem") == 0) - { - inFile.read(reinterpret_cast(values.data()), - static_cast(numVertices * sizeof(ValueType))); - } - else - { - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - } +/// Read data from a single file and split the data into blocks across ranks +/// This is a simple implementation that will read the full data on all ranks +/// and then extract only the relevant subblock for that rank. +/// The function support reading from BOV as well from ASCII files +/// +/// @param[in] rank The current MPI rank the function is called from +/// @param[in] size The number of MPI ranks +/// @param[in] filename Name of the file with %d as placeholder for the integer index of the block +/// @param[in] numBlocks Number of blocks to use during computation +/// @param[in] blocksPerRank Number of data blocks to process on each rank +/// @param[out] nDims Number of data dimensions (i.e, 2 or 3) +/// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter +/// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) +/// @param[in] blocksPerDim Number of data blocks used in each data dimension +/// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with +/// with respect to blocksPerDim +/// @param[in] dataReadTime Time to read the data +/// @param[in] buildDatasetTime Time to construct the VTKm datasets +/// @returns bool indicating whether the read was successful or not +template +bool readSingleBlockFile(const int& rank, + const int& size, + const std::string& filename, + const int& numBlocks, + const int& blocksPerRank, + std::vector::size_type& nDims, + vtkm::cont::PartitionedDataSet& useDataSet, + vtkm::Id3& globalSize, + vtkm::Id3& blocksPerDim, + vtkm::cont::ArrayHandle& localBlockIndices, + vtkm::Float64& dataReadTime, + vtkm::Float64& buildDatasetTime) +{ + vtkm::cont::Timer totalTime; + totalTime.Start(); + vtkm::Float64 prevTime = 0; + vtkm::Float64 currTime = 0; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; + localBlockIndices.Allocate(blocksPerRank); + auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - // Create vtk-m data set - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - if (nDims == 2) - { - const vtkm::Id2 v_dims{ - static_cast(dims[0]), - static_cast(dims[1]), - }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]) }; - const vtkm::Vec v_spacing{ 1, 1 }; - ds = dsb.Create(v_dims, v_origin, v_spacing); - vtkm::cont::CellSetStructured<2> cs; - cs.SetPointDimensions(v_dims); - cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); - cs.SetGlobalPointIndexStart(vtkm::Id2{ offset[0], offset[1] }); - ds.SetCellSet(cs); - } - else - { - VTKM_ASSERT(nDims == 3); - const vtkm::Id3 v_dims{ static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2]) }; - const vtkm::Vec v_origin{ static_cast(offset[0]), - static_cast(offset[1]), - static_cast(offset[2]) }; - vtkm::Vec v_spacing(1, 1, 1); - ds = dsb.Create(v_dims, v_origin, v_spacing); - vtkm::cont::CellSetStructured<3> cs; - cs.SetPointDimensions(v_dims); - cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3{ offset[0], offset[1], offset[2] }); - ds.SetCellSet(cs); - } + vtkm::cont::DataSet inDataSet; + // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. + std::vector values; + std::vector dims; - ds.AddPointField("values", values); - // and add to partition - useDataSet.AppendPartition(ds); + // Read BOV data file + if (filename.compare(filename.length() - 3, 3, "bov") == 0) + { + std::cout << "Reading BOV file" << std::endl; + vtkm::io::BOVDataSetReader reader(filename); + inDataSet = reader.ReadDataSet(); + nDims = 3; + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + // Copy the data into the values array so we can construct a multiblock dataset + vtkm::Id3 pointDimensions; + auto cellSet = inDataSet.GetCellSet(); + vtkm::cont::CastAndCall( + cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); + std::cout << "Point dimensions are " << pointDimensions << std::endl; + dims.resize(3); + dims[0] = pointDimensions[0]; + dims[1] = pointDimensions[1]; + dims[2] = pointDimensions[2]; + auto tempFieldData = inDataSet.GetField(0).GetData(); + values.resize(static_cast(tempFieldData.GetNumberOfValues())); + auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); + vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); + valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU + } + // Read ASCII data input + else + { + std::cout << "Reading ASCII file" << std::endl; + std::ifstream inFile(filename); + if (inFile.bad()) + return 0; - localBlockIndicesPortal.Set( - blockNo, - vtkm::Id3{ static_cast(blockIndex[0]), - static_cast(blockIndex[1]), - static_cast(nDims == 3 ? blockIndex[2] : 0) }); + // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z + std::string line; + getline(inFile, line); + std::istringstream linestream(line); + vtkm::Id dimVertices; + while (linestream >> dimVertices) + { + dims.push_back(dimVertices); + } + // Swap dimensions so that they are from fastest to slowest growing + // dims[0] -> col; dims[1] -> row, dims[2] ->slice + std::swap(dims[0], dims[1]); - if (blockNo == 0) - { - blocksPerDim = vtkm::Id3{ static_cast(bpd[0]), - static_cast(bpd[1]), - static_cast(nDims == 3 ? bpd[2] : 1) }; - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; + // Compute the number of vertices, i.e., xdim * ydim * zdim + nDims = static_cast(dims.size()); + std::size_t numVertices = static_cast( + std::accumulate(dims.begin(), dims.end(), std::size_t(1), std::multiplies())); + + // Check the the number of dimensiosn is either 2D or 3D + bool invalidNumDimensions = (nDims < 2 || nDims > 3); + // Log any errors if found on rank 0 + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + invalidNumDimensions && (rank == 0), + "The input mesh is " << nDims << "D. The input data must be either 2D or 3D."); + // If we found any errors in the setttings than finalize MPI and exit the execution + if (invalidNumDimensions) + { + return false; + } + + // Read data + values.resize(numVertices); + for (std::size_t vertex = 0; vertex < numVertices; ++vertex) + { + inFile >> values[vertex]; + } + + // finish reading the data + inFile.close(); + + currTime = totalTime.GetElapsedTime(); + dataReadTime = currTime - prevTime; + prevTime = currTime; + + } // END ASCII Read + + // Create a multi-block dataset for multi-block DIY-paralle processing + blocksPerDim = + nDims == 3 ? vtkm::Id3(1, 1, numBlocks) : vtkm::Id3(1, numBlocks, 1); // Decompose the data into + globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(dims[2])) + : vtkm::Id3(static_cast(dims[0]), + static_cast(dims[1]), + static_cast(1)); + std::cout << blocksPerDim << " " << globalSize << std::endl; + { + vtkm::Id lastDimSize = + (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); + if (size > (lastDimSize / 2.)) + { + VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, + rank == 0, + "Number of ranks too large for data. Use " << lastDimSize / 2 + << "or fewer ranks"); + return false; + } + vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); + vtkm::Id blockSize = standardBlockSize; + vtkm::Id blockSliceSize = + nDims == 2 ? static_cast(dims[0]) : static_cast((dims[0] * dims[1])); + vtkm::Id blockNumValues = blockSize * blockSliceSize; + + vtkm::Id startBlock = blocksPerRank * rank; + vtkm::Id endBlock = startBlock + blocksPerRank; + for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) + { + vtkm::Id localBlockIndex = blockIndex - startBlock; + vtkm::Id blockStart = blockIndex * blockNumValues; + vtkm::Id blockEnd = blockStart + blockNumValues; + if (blockIndex < (numBlocks - 1)) // add overlap between regions + { + blockEnd += blockSliceSize; + } + else + { + blockEnd = lastDimSize * blockSliceSize; + } + vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); + + vtkm::cont::DataSetBuilderUniform dsb; + vtkm::cont::DataSet ds; + + // 2D data + if (nDims == 2) + { + vtkm::Id2 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(currBlockSize); + vtkm::Vec origin(0, blockIndex * blockSize); + vtkm::Vec spacing(1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<2> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); + cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); + } + // 3D data + else + { + vtkm::Id3 vdims; + vdims[0] = static_cast(dims[0]); + vdims[1] = static_cast(dims[1]); + vdims[2] = static_cast(currBlockSize); + vtkm::Vec origin(0, 0, (blockIndex * blockSize)); + vtkm::Vec spacing(1, 1, 1); + ds = dsb.Create(vdims, origin, spacing); + vtkm::cont::CellSetStructured<3> cs; + cs.SetPointDimensions(vdims); + cs.SetGlobalPointDimensions(globalSize); + cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); + ds.SetCellSet(cs); + localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); } + std::vector subValues((values.begin() + blockStart), + (values.begin() + blockEnd)); - /// Read data from a single file and split the data into blocks across ranks - /// This is a simple implementation that will read the full data on all ranks - /// and then extract only the relevant subblock for that rank. - /// The function support reading from BOV as well from ASCII files - /// - /// @param[in] rank The current MPI rank the function is called from - /// @param[in] size The number of MPI ranks - /// @param[in] filename Name of the file with %d as placeholder for the integer index of the block - /// @param[in] numBlocks Number of blocks to use during computation - /// @param[in] blocksPerRank Number of data blocks to process on each rank - /// @param[out] nDims Number of data dimensions (i.e, 2 or 3) - /// @param[out] useDataSet VTKm partioned dataset to be used with the distributed contour tree filter - /// @param[out] globalSize Global extends of the input mesh (i.e., number of mesh points in each dimension) - /// @param[in] blocksPerDim Number of data blocks used in each data dimension - /// @param[in] localBlockIndices Array with the (x,y,z) index of each local data block with - /// with respect to blocksPerDim - /// @param[in] dataReadTime Time to read the data - /// @param[in] buildDatasetTime Time to construct the VTKm datasets - /// @returns bool indicating whether the read was successful or not - template - bool readSingleBlockFile(const int& rank, - const int& size, - const std::string& filename, - const int& numBlocks, - const int& blocksPerRank, - std::vector::size_type& nDims, - vtkm::cont::PartitionedDataSet& useDataSet, - vtkm::Id3& globalSize, - vtkm::Id3& blocksPerDim, - vtkm::cont::ArrayHandle& localBlockIndices, - vtkm::Float64& dataReadTime, - vtkm::Float64& buildDatasetTime) - { - vtkm::cont::Timer totalTime; - totalTime.Start(); - vtkm::Float64 prevTime = 0; - vtkm::Float64 currTime = 0; - - localBlockIndices.Allocate(blocksPerRank); - auto localBlockIndicesPortal = localBlockIndices.WritePortal(); - - vtkm::cont::DataSet inDataSet; - // TODO: Currently FloatDefault would be fine, but it could cause problems if we ever read binary files here. - std::vector values; - std::vector dims; - - // Read BOV data file - if (filename.compare(filename.length() - 3, 3, "bov") == 0) - { - std::cout << "Reading BOV file" << std::endl; - vtkm::io::BOVDataSetReader reader(filename); - inDataSet = reader.ReadDataSet(); - nDims = 3; - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - // Copy the data into the values array so we can construct a multiblock dataset - vtkm::Id3 pointDimensions; - auto cellSet = inDataSet.GetCellSet(); - vtkm::cont::CastAndCall( - cellSet, vtkm::worklet::contourtree_augmented::GetPointDimensions(), pointDimensions); - std::cout << "Point dimensions are " << pointDimensions << std::endl; - dims.resize(3); - dims[0] = pointDimensions[0]; - dims[1] = pointDimensions[1]; - dims[2] = pointDimensions[2]; - auto tempFieldData = inDataSet.GetField(0).GetData(); - values.resize(static_cast(tempFieldData.GetNumberOfValues())); - auto valuesHandle = vtkm::cont::make_ArrayHandle(values, vtkm::CopyFlag::Off); - vtkm::cont::ArrayCopy(tempFieldData, valuesHandle); - valuesHandle.SyncControlArray(); //Forces values to get updated if copy happened on GPU - } - // Read ASCII data input - else - { - std::cout << "Reading ASCII file" << std::endl; - std::ifstream inFile(filename); - if (inFile.bad()) - return 0; - - // Read the dimensions of the mesh, i.e,. number of elementes in x, y, and z - std::string line; - getline(inFile, line); - std::istringstream linestream(line); - vtkm::Id dimVertices; - while (linestream >> dimVertices) - { - dims.push_back(dimVertices); - } - // Swap dimensions so that they are from fastest to slowest growing - // dims[0] -> col; dims[1] -> row, dims[2] ->slice - std::swap(dims[0], dims[1]); - - // Compute the number of vertices, i.e., xdim * ydim * zdim - nDims = static_cast(dims.size()); - std::size_t numVertices = static_cast(std::accumulate( - dims.begin(), dims.end(), std::size_t(1), std::multiplies())); - - // Check the the number of dimensiosn is either 2D or 3D - bool invalidNumDimensions = (nDims < 2 || nDims > 3); - // Log any errors if found on rank 0 - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - invalidNumDimensions && (rank == 0), - "The input mesh is " << nDims - << "D. The input data must be either 2D or 3D."); - // If we found any errors in the setttings than finalize MPI and exit the execution - if (invalidNumDimensions) - { - return false; - } - - // Read data - values.resize(numVertices); - for (std::size_t vertex = 0; vertex < numVertices; ++vertex) - { - inFile >> values[vertex]; - } - - // finish reading the data - inFile.close(); - - currTime = totalTime.GetElapsedTime(); - dataReadTime = currTime - prevTime; - prevTime = currTime; - - } // END ASCII Read - - // Create a multi-block dataset for multi-block DIY-paralle processing - blocksPerDim = nDims == 3 ? vtkm::Id3(1, 1, numBlocks) - : vtkm::Id3(1, numBlocks, 1); // Decompose the data into - globalSize = nDims == 3 ? vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(dims[2])) - : vtkm::Id3(static_cast(dims[0]), - static_cast(dims[1]), - static_cast(1)); - std::cout << blocksPerDim << " " << globalSize << std::endl; - { - vtkm::Id lastDimSize = - (nDims == 2) ? static_cast(dims[1]) : static_cast(dims[2]); - if (size > (lastDimSize / 2.)) - { - VTKM_LOG_IF_S(vtkm::cont::LogLevel::Error, - rank == 0, - "Number of ranks too large for data. Use " << lastDimSize / 2 - << "or fewer ranks"); - return false; - } - vtkm::Id standardBlockSize = (vtkm::Id)(lastDimSize / numBlocks); - vtkm::Id blockSize = standardBlockSize; - vtkm::Id blockSliceSize = nDims == 2 ? static_cast(dims[0]) - : static_cast((dims[0] * dims[1])); - vtkm::Id blockNumValues = blockSize * blockSliceSize; - - vtkm::Id startBlock = blocksPerRank * rank; - vtkm::Id endBlock = startBlock + blocksPerRank; - for (vtkm::Id blockIndex = startBlock; blockIndex < endBlock; ++blockIndex) - { - vtkm::Id localBlockIndex = blockIndex - startBlock; - vtkm::Id blockStart = blockIndex * blockNumValues; - vtkm::Id blockEnd = blockStart + blockNumValues; - if (blockIndex < (numBlocks - 1)) // add overlap between regions - { - blockEnd += blockSliceSize; - } - else - { - blockEnd = lastDimSize * blockSliceSize; - } - vtkm::Id currBlockSize = (vtkm::Id)((blockEnd - blockStart) / blockSliceSize); - - vtkm::cont::DataSetBuilderUniform dsb; - vtkm::cont::DataSet ds; - - // 2D data - if (nDims == 2) - { - vtkm::Id2 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(currBlockSize); - vtkm::Vec origin(0, blockIndex * blockSize); - vtkm::Vec spacing(1, 1); - ds = dsb.Create(vdims, origin, spacing); - vtkm::cont::CellSetStructured<2> cs; - cs.SetPointDimensions(vdims); - cs.SetGlobalPointDimensions(vtkm::Id2{ globalSize[0], globalSize[1] }); - cs.SetGlobalPointIndexStart(vtkm::Id2{ 0, (blockStart / blockSliceSize) }); - ds.SetCellSet(cs); - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, blockIndex, 0)); - } - // 3D data - else - { - vtkm::Id3 vdims; - vdims[0] = static_cast(dims[0]); - vdims[1] = static_cast(dims[1]); - vdims[2] = static_cast(currBlockSize); - vtkm::Vec origin(0, 0, (blockIndex * blockSize)); - vtkm::Vec spacing(1, 1, 1); - ds = dsb.Create(vdims, origin, spacing); - vtkm::cont::CellSetStructured<3> cs; - cs.SetPointDimensions(vdims); - cs.SetGlobalPointDimensions(globalSize); - cs.SetGlobalPointIndexStart(vtkm::Id3(0, 0, blockStart / blockSliceSize)); - ds.SetCellSet(cs); - localBlockIndicesPortal.Set(localBlockIndex, vtkm::Id3(0, 0, blockIndex)); - } - - std::vector subValues((values.begin() + blockStart), - (values.begin() + blockEnd)); - - ds.AddPointField("values", subValues); - useDataSet.AppendPartition(ds); - } - } - currTime = totalTime.GetElapsedTime(); - buildDatasetTime = currTime - prevTime; - return true; - } + ds.AddPointField("values", subValues); + useDataSet.AppendPartition(ds); + } + } + currTime = totalTime.GetElapsedTime(); + buildDatasetTime = currTime - prevTime; + return true; +} #endif From 7766bbc3cd18b101e87525d43050f7da357485e1 Mon Sep 17 00:00:00 2001 From: Ben Boeckel Date: Wed, 21 Dec 2022 09:34:18 -0500 Subject: [PATCH 14/29] gitlab-ci: use arch-specific tags for OS selection Also add some missing OS tags for HIP jobs. --- .gitlab/ci/centos7.yml | 6 +++--- .gitlab/ci/centos8.yml | 4 ++-- .gitlab/ci/doxygen.yml | 2 +- .gitlab/ci/rhel8.yml | 8 ++++---- .gitlab/ci/ubuntu1604.yml | 12 ++++++------ .gitlab/ci/ubuntu1804.yml | 24 ++++++++++++------------ .gitlab/ci/ubuntu2004.yml | 10 ++++++---- .gitlab/ci/windows10.yml | 4 ++-- 8 files changed, 36 insertions(+), 34 deletions(-) diff --git a/.gitlab/ci/centos7.yml b/.gitlab/ci/centos7.yml index c30be42bd..018b83fc1 100644 --- a/.gitlab/ci/centos7.yml +++ b/.gitlab/ci/centos7.yml @@ -6,7 +6,7 @@ build:centos7_gcc73: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -24,7 +24,7 @@ test:centos7_gcc73: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - turing extends: @@ -41,7 +41,7 @@ test:rhel8_test_centos7: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - turing extends: diff --git a/.gitlab/ci/centos8.yml b/.gitlab/ci/centos8.yml index 03ca9a583..ab1592dc0 100644 --- a/.gitlab/ci/centos8.yml +++ b/.gitlab/ci/centos8.yml @@ -6,7 +6,7 @@ build:centos8_sanitizer: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .centos8 - .cmake_build_linux @@ -21,7 +21,7 @@ test:centos8_sanitizer: - test - vtkm - docker - - linux + - linux-x86_64 - privileged extends: - .centos8 diff --git a/.gitlab/ci/doxygen.yml b/.gitlab/ci/doxygen.yml index e04f4cb6b..da5bf167d 100644 --- a/.gitlab/ci/doxygen.yml +++ b/.gitlab/ci/doxygen.yml @@ -18,7 +18,7 @@ doxygen: tags: - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu2004_doxygen before_script: diff --git a/.gitlab/ci/rhel8.yml b/.gitlab/ci/rhel8.yml index c17398b9c..5908aa322 100644 --- a/.gitlab/ci/rhel8.yml +++ b/.gitlab/ci/rhel8.yml @@ -6,7 +6,7 @@ build:rhel8: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .rhel8 - .cmake_build_linux @@ -20,7 +20,7 @@ test:rhel8: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .rhel8 - .cmake_test_linux @@ -37,7 +37,7 @@ build:rhel8_vtk_types: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .rhel8 - .cmake_build_linux @@ -51,7 +51,7 @@ test:rhel8_vtk_types: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .rhel8 - .cmake_test_linux diff --git a/.gitlab/ci/ubuntu1604.yml b/.gitlab/ci/ubuntu1604.yml index fce61e4ac..e6eac8a1d 100644 --- a/.gitlab/ci/ubuntu1604.yml +++ b/.gitlab/ci/ubuntu1604.yml @@ -6,7 +6,7 @@ build:ubuntu1604_gcc5: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -25,7 +25,7 @@ test:ubuntu1604_gcc5: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - pascal extends: @@ -44,7 +44,7 @@ build:ubuntu1604_gcc5_2: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -63,7 +63,7 @@ test:ubuntu1804_test_ubuntu1604_gcc5_2: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - pascal extends: @@ -84,7 +84,7 @@ build:ubuntu1604_clang5: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1604 - .cmake_build_linux @@ -101,7 +101,7 @@ test:ubuntu1604_clang5: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1604 - .cmake_test_linux diff --git a/.gitlab/ci/ubuntu1804.yml b/.gitlab/ci/ubuntu1804.yml index 6b9121f1d..e7bde2c33 100644 --- a/.gitlab/ci/ubuntu1804.yml +++ b/.gitlab/ci/ubuntu1804.yml @@ -7,7 +7,7 @@ build:ubuntu1804_gcc9: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_build_linux @@ -23,7 +23,7 @@ test:ubuntu1804_gcc9: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_test_linux @@ -45,7 +45,7 @@ build:ubuntu1804_gcc7: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -63,7 +63,7 @@ test:ubuntu1804_gcc7: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - turing extends: @@ -84,7 +84,7 @@ build:ubuntu1804_clang_cuda: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -103,7 +103,7 @@ test:ubuntu1804_clang_cuda: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - pascal extends: @@ -123,7 +123,7 @@ build:ubuntu1804_gcc6: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_build_linux @@ -138,7 +138,7 @@ test:ubuntu1804_gcc6: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_test_linux @@ -159,7 +159,7 @@ build:ubuntu1804_clang8: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_build_linux @@ -175,7 +175,7 @@ test:ubuntu1804_clang8: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu1804 - .cmake_test_linux @@ -192,7 +192,7 @@ build:ubuntu1804_kokkos: - build - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - large-memory extends: @@ -209,7 +209,7 @@ test:ubuntu1804_kokkos: - test - vtkm - docker - - linux + - linux-x86_64 - cuda-rt - turing extends: diff --git a/.gitlab/ci/ubuntu2004.yml b/.gitlab/ci/ubuntu2004.yml index ad7aded76..2e0113fe3 100644 --- a/.gitlab/ci/ubuntu2004.yml +++ b/.gitlab/ci/ubuntu2004.yml @@ -3,7 +3,7 @@ build:ubuntu2004_gcc9: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu2004 - .cmake_build_linux @@ -17,7 +17,7 @@ test:ubuntu2004_gcc9: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu2004 - .cmake_test_linux @@ -36,7 +36,7 @@ build:ubuntu2004_kokkos: - build - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu2004_kokkos - .cmake_build_linux @@ -50,7 +50,7 @@ test:ubuntu2004_kokkos: - test - vtkm - docker - - linux + - linux-x86_64 extends: - .ubuntu2004_kokkos - .cmake_test_linux @@ -65,6 +65,7 @@ build:ubuntu2004_hip_kokkos: - build - vtkm - docker + - linux-x86_64 - radeon extends: - .ubuntu2004_hip_kokkos @@ -82,6 +83,7 @@ test:ubuntu2004_hip_kokkos: - build - vtkm - docker + - linux-x86_64 - radeon extends: - .ubuntu2004_hip_kokkos diff --git a/.gitlab/ci/windows10.yml b/.gitlab/ci/windows10.yml index 9955d6a54..5da0da98d 100644 --- a/.gitlab/ci/windows10.yml +++ b/.gitlab/ci/windows10.yml @@ -86,7 +86,7 @@ build:windows_vs2019: - vtkm # Since this is a bare runner, pin to a project. - nonconcurrent - build - - windows + - windows-x86_64 - shell - vs2019 - msvc-19.25 @@ -106,7 +106,7 @@ test:windows_vs2019: - vtkm # Since this is a bare runner, pin to a project. - nonconcurrent - test - - windows + - windows-x86_64 - shell - vs2019 - msvc-19.25 From c3f4c924d471e09ff7b6877308e9f26b73ff6dd3 Mon Sep 17 00:00:00 2001 From: Ben Boeckel Date: Wed, 21 Dec 2022 09:34:18 -0500 Subject: [PATCH 15/29] gitlab-ci: add missing feature tag for doxygen submission --- .gitlab/ci/doxygen.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/ci/doxygen.yml b/.gitlab/ci/doxygen.yml index da5bf167d..2741ebf73 100644 --- a/.gitlab/ci/doxygen.yml +++ b/.gitlab/ci/doxygen.yml @@ -16,6 +16,7 @@ doxygen: timeout: 30 minutes interruptible: true tags: + - build - vtkm - docker - linux-x86_64 From 054661f68270313cf04fc9dc7eb85e4c85825a20 Mon Sep 17 00:00:00 2001 From: Ben Boeckel Date: Wed, 21 Dec 2022 09:35:02 -0500 Subject: [PATCH 16/29] gitlab-ci: use arch-specific tags for OS selection --- .gitlab/ci/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/ci/macos.yml b/.gitlab/ci/macos.yml index 6badbab88..7f96585f9 100644 --- a/.gitlab/ci/macos.yml +++ b/.gitlab/ci/macos.yml @@ -78,6 +78,6 @@ test:macos_xcode13: .macos_build_tags: tags: - vtk-m - - macos + - macos-x86_64 - xcode-13.3 - nonconcurrent From c8cc834b90ef90a3505766b33bf0a347af9b1fc7 Mon Sep 17 00:00:00 2001 From: Ben Boeckel Date: Wed, 21 Dec 2022 09:35:53 -0500 Subject: [PATCH 17/29] gitlab-ci: add missing platform and feature tags to ascent job --- .gitlab/ci/ubuntu2004.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab/ci/ubuntu2004.yml b/.gitlab/ci/ubuntu2004.yml index 108cdb8be..16d3bc189 100644 --- a/.gitlab/ci/ubuntu2004.yml +++ b/.gitlab/ci/ubuntu2004.yml @@ -120,6 +120,8 @@ build:ascent: tags: - vtkm - docker + - build + - linux-x86_64 extends: - .ubuntu2004 - .run_only_merge_requests From d5ce967890f32c9cf47b3d88e1854c300dd9af91 Mon Sep 17 00:00:00 2001 From: Vicente Adolfo Bolea Sanchez Date: Mon, 26 Dec 2022 13:40:12 -0500 Subject: [PATCH 18/29] CMAKE: fix vtkm devices namespaces --- CMake/VTKmCompilerFlags.cmake | 2 +- CMake/VTKmDeviceAdapters.cmake | 32 ++++++++++++++---------- CMake/VTKmWrappers.cmake | 4 +-- CMake/testing/VTKmTestInstall.cmake | 4 +-- vtkm/cont/CMakeLists.txt | 4 +-- vtkm/cont/kokkos/internal/CMakeLists.txt | 6 ++--- vtkm/cont/kokkos/testing/CMakeLists.txt | 4 +-- vtkm/cont/kokkos/vtkm.module | 2 +- vtkm/internal/CMakeLists.txt | 4 +-- 9 files changed, 34 insertions(+), 28 deletions(-) diff --git a/CMake/VTKmCompilerFlags.cmake b/CMake/VTKmCompilerFlags.cmake index d37ba3a69..2a7f4841f 100644 --- a/CMake/VTKmCompilerFlags.cmake +++ b/CMake/VTKmCompilerFlags.cmake @@ -186,7 +186,7 @@ function(setup_cuda_flags) endfunction() #common warnings for all platforms when building cuda -if ((TARGET vtkm_cuda) OR (TARGET vtkm::kokkos_cuda)) +if ((TARGET vtkm_cuda) OR (TARGET vtkm_kokkos_cuda)) setup_cuda_flags() endif() diff --git a/CMake/VTKmDeviceAdapters.cmake b/CMake/VTKmDeviceAdapters.cmake index e87f05357..2a056a950 100644 --- a/CMake/VTKmDeviceAdapters.cmake +++ b/CMake/VTKmDeviceAdapters.cmake @@ -41,7 +41,7 @@ function(vtkm_extract_real_library library real_library) endif() endfunction() -if(VTKm_ENABLE_TBB AND NOT TARGET vtkm_tbb) +if(VTKm_ENABLE_TBB AND NOT (TARGET vtkm_tbb OR TARGET vtkm::tbb)) # Skip find_package(TBB) if we already have it if (NOT TARGET TBB::tbb) find_package(TBB REQUIRED) @@ -54,7 +54,7 @@ if(VTKm_ENABLE_TBB AND NOT TARGET vtkm_tbb) install(TARGETS vtkm_tbb EXPORT ${VTKm_EXPORT_NAME}) endif() -if(VTKm_ENABLE_OPENMP AND NOT TARGET vtkm_openmp) +if(VTKm_ENABLE_OPENMP AND NOT (TARGET vtkm_openmp OR TARGET vtkm::openmp)) find_package(OpenMP 4.0 REQUIRED COMPONENTS CXX QUIET) add_library(vtkm_openmp INTERFACE) @@ -85,7 +85,7 @@ if(VTKm_ENABLE_CUDA) message(FATAL_ERROR "VTK-m CUDA support requires version 9.2+") endif() - if(NOT TARGET vtkm_cuda) + if(NOT (TARGET vtkm_cuda OR TARGET vtkm::cuda)) add_library(vtkm_cuda INTERFACE) set_target_properties(vtkm_cuda PROPERTIES EXPORT_NAME cuda) @@ -313,13 +313,13 @@ function(kokkos_fix_compile_options) endforeach() endwhile() - set_property(TARGET vtkm::kokkos PROPERTY INTERFACE_LINK_OPTIONS "$") + set_property(TARGET vtkm_kokkos PROPERTY INTERFACE_LINK_OPTIONS "$") if (OPENMP IN_LIST Kokkos_DEVICES) - set_property(TARGET vtkm::kokkos PROPERTY INTERFACE_LINK_OPTIONS "$") + set_property(TARGET vtkm_kokkos PROPERTY INTERFACE_LINK_OPTIONS "$") endif() endfunction() -if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm::kokkos) +if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm_kokkos) cmake_minimum_required(VERSION 3.13 FATAL_ERROR) find_package(Kokkos REQUIRED) @@ -344,19 +344,25 @@ if(VTKm_ENABLE_KOKKOS AND NOT TARGET vtkm::kokkos) set(CMAKE_CUDA_ARCHITECTURES ${cuda_arch}) message(STATUS "Detected Cuda arch from Kokkos: ${cuda_arch}") - add_library(vtkm::kokkos_cuda INTERFACE IMPORTED GLOBAL) + add_library(vtkm_kokkos_cuda INTERFACE) + set_property(TARGET vtkm_kokkos_cuda PROPERTY EXPORT_NAME kokkos_cuda) + install(TARGETS vtkm_kokkos_cuda EXPORT ${VTKm_EXPORT_NAME}) elseif(HIP IN_LIST Kokkos_DEVICES) cmake_minimum_required(VERSION 3.18 FATAL_ERROR) enable_language(HIP) - add_library(vtkm::kokkos_hip INTERFACE IMPORTED GLOBAL) - set_property(TARGET Kokkos::kokkoscore PROPERTY INTERFACE_COMPILE_OPTIONS "") - set_property(TARGET Kokkos::kokkoscore PROPERTY INTERFACE_LINK_OPTIONS "") + set_target_properties(Kokkos::kokkoscore PROPERTIES + INTERFACE_COMPILE_OPTIONS "" + INTERFACE_LINK_OPTIONS "" + ) + add_library(vtkm_kokkos_hip INTERFACE) + set_property(TARGET vtkm_kokkos_hip PROPERTY EXPORT_NAME kokkos_hip) + install(TARGETS vtkm_kokkos_hip EXPORT ${VTKm_EXPORT_NAME}) endif() - add_library(vtkm::kokkos INTERFACE IMPORTED GLOBAL) - set_target_properties(vtkm::kokkos PROPERTIES INTERFACE_LINK_LIBRARIES "Kokkos::kokkos") + add_library(vtkm_kokkos INTERFACE IMPORTED GLOBAL) + set_target_properties(vtkm_kokkos PROPERTIES INTERFACE_LINK_LIBRARIES "Kokkos::kokkos") - if (TARGET vtkm::kokkos_cuda) + if (TARGET vtkm_kokkos_cuda) kokkos_fix_compile_options() endif() endif() diff --git a/CMake/VTKmWrappers.cmake b/CMake/VTKmWrappers.cmake index 3284a6279..10ba21e06 100644 --- a/CMake/VTKmWrappers.cmake +++ b/CMake/VTKmWrappers.cmake @@ -434,9 +434,9 @@ function(vtkm_add_target_information uses_vtkm_target) endforeach() endif() - if((TARGET vtkm_cuda) OR (TARGET vtkm::kokkos_cuda)) + if(TARGET vtkm_cuda OR TARGET vtkm::cuda OR TARGET vtkm_kokkos_cuda OR TARGET vtkm::kokkos_cuda) set_source_files_properties(${VTKm_TI_DEVICE_SOURCES} PROPERTIES LANGUAGE "CUDA") - elseif(TARGET vtkm::kokkos_hip) + elseif(TARGET vtkm_kokkos_hip OR TARGET vtkm::kokkos_hip) set_source_files_properties(${VTKm_TI_DEVICE_SOURCES} PROPERTIES LANGUAGE "HIP") kokkos_compilation(SOURCE ${VTKm_TI_DEVICE_SOURCES}) endif() diff --git a/CMake/testing/VTKmTestInstall.cmake b/CMake/testing/VTKmTestInstall.cmake index 839c80b82..53ab591d9 100644 --- a/CMake/testing/VTKmTestInstall.cmake +++ b/CMake/testing/VTKmTestInstall.cmake @@ -116,7 +116,7 @@ function(vtkm_test_against_install dir) set(args -C ${build_config}) endif() - if(WIN32 AND TARGET vtkm::tbb) + if(WIN32 AND TARGET vtkm_tbb) #on windows we need to specify these as FindTBB won't #find the installed version just with the prefix path list(APPEND args @@ -126,7 +126,7 @@ function(vtkm_test_against_install dir) ) endif() - if(TARGET vtkm::kokkos) + if(TARGET vtkm_kokkos) list(APPEND args "-DKokkos_DIR=${Kokkos_DIR}") endif() diff --git a/vtkm/cont/CMakeLists.txt b/vtkm/cont/CMakeLists.txt index 17273aee9..35f356527 100644 --- a/vtkm/cont/CMakeLists.txt +++ b/vtkm/cont/CMakeLists.txt @@ -280,8 +280,8 @@ endif() if(TARGET vtkm_openmp) list(APPEND backends vtkm_openmp) endif() -if(TARGET vtkm::kokkos) - list(APPEND backends vtkm::kokkos) +if(TARGET vtkm_kokkos) + list(APPEND backends vtkm_kokkos) endif() target_link_libraries(vtkm_cont PUBLIC vtkm_compiler_flags ${backends}) diff --git a/vtkm/cont/kokkos/internal/CMakeLists.txt b/vtkm/cont/kokkos/internal/CMakeLists.txt index c00a8851d..9f924b0f4 100644 --- a/vtkm/cont/kokkos/internal/CMakeLists.txt +++ b/vtkm/cont/kokkos/internal/CMakeLists.txt @@ -20,7 +20,7 @@ set(headers vtkm_declare_headers(${headers}) -if (TARGET vtkm::kokkos) +if (TARGET vtkm_kokkos) set(sources ${CMAKE_CURRENT_SOURCE_DIR}/DeviceAdapterAlgorithmKokkos.cxx ${CMAKE_CURRENT_SOURCE_DIR}/DeviceAdapterMemoryManagerKokkos.cxx @@ -29,9 +29,9 @@ if (TARGET vtkm::kokkos) ${CMAKE_CURRENT_SOURCE_DIR}/KokkosTypes.cxx) target_sources(vtkm_cont PRIVATE ${sources}) - if (TARGET vtkm::kokkos_cuda) + if (TARGET vtkm_kokkos_cuda) set_source_files_properties(${sources} TARGET_DIRECTORY vtkm_cont PROPERTIES LANGUAGE CUDA) - elseif(TARGET vtkm::kokkos_hip) + elseif(TARGET vtkm_kokkos_hip) set_source_files_properties(${sources} TARGET_DIRECTORY vtkm_cont PROPERTIES LANGUAGE HIP) kokkos_compilation(SOURCE ${sources}) endif() diff --git a/vtkm/cont/kokkos/testing/CMakeLists.txt b/vtkm/cont/kokkos/testing/CMakeLists.txt index 2afc9846b..ffdfd6eb9 100644 --- a/vtkm/cont/kokkos/testing/CMakeLists.txt +++ b/vtkm/cont/kokkos/testing/CMakeLists.txt @@ -15,9 +15,9 @@ set(unit_tests vtkm_unit_tests(SOURCES ${unit_tests} LABEL "KOKKOS" LIBRARIES vtkm_worklet BACKEND kokkos) -if (TARGET vtkm::kokkos_cuda) +if (TARGET vtkm_kokkos_cuda) set_source_files_properties(${unit_tests} PROPERTIES LANGUAGE CUDA) -elseif(TARGET vtkm::kokkos_hip) +elseif(TARGET vtkm_kokkos_hip) set_source_files_properties(${unit_tests} PROPERTIES LANGUAGE HIP) kokkos_compilation(SOURCE ${unit_tests}) endif() diff --git a/vtkm/cont/kokkos/vtkm.module b/vtkm/cont/kokkos/vtkm.module index 195c97d17..1c41028fd 100644 --- a/vtkm/cont/kokkos/vtkm.module +++ b/vtkm/cont/kokkos/vtkm.module @@ -6,4 +6,4 @@ DEPENDS vtkm_cont TEST_DEPENDS vtkm_worklet - vtkm::kokkos + vtkm_kokkos diff --git a/vtkm/internal/CMakeLists.txt b/vtkm/internal/CMakeLists.txt index 62e860314..88f7622f8 100755 --- a/vtkm/internal/CMakeLists.txt +++ b/vtkm/internal/CMakeLists.txt @@ -31,9 +31,9 @@ if(VTKM_ENABLE_CUDA) string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\2" VTKM_CUDA_VERSION_MINOR ${CMAKE_CUDA_COMPILER_VERSION}) endif() -if (TARGET vtkm::kokkos_cuda) +if (TARGET vtkm_kokkos_cuda) set(VTKM_KOKKOS_CUDA ON) -elseif(TARGET vtkm::kokkos_hip) +elseif(TARGET vtkm_kokkos_hip) set(VTKM_KOKKOS_HIP ON) endif() From a3d7f9475b4c213417a00469e934e2b5c931ffc3 Mon Sep 17 00:00:00 2001 From: Kenneth Moreland Date: Tue, 3 Jan 2023 13:56:29 -0700 Subject: [PATCH 19/29] Force functions passed as templates to be functors There are some special functions/methods that take as an argument a function-like object and then call that function with some arguments. There are some instances where a templated function was passed given the appropriate template. Even though there is a specific function, this gets passed as a function pointer and calling a function pointer on some devices is a no-no. Replace these function arguments with lambdas, which are constructed as unnamed functor objects. --- vtkm/cont/ArrayHandleCompositeVector.h | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/vtkm/cont/ArrayHandleCompositeVector.h b/vtkm/cont/ArrayHandleCompositeVector.h index 57ba4d2ba..02bbf59e7 100644 --- a/vtkm/cont/ArrayHandleCompositeVector.h +++ b/vtkm/cont/ArrayHandleCompositeVector.h @@ -66,18 +66,6 @@ struct GetValueType using ValueType = typename ArrayType::ValueType; }; -// GetFromPortals: ------------------------------------------------------------- -// Given a set of array portals as arguments, returns a Vec comprising the values -// at the provided index. -VTKM_SUPPRESS_EXEC_WARNINGS -template -VTKM_EXEC_CONT typename GetValueType::ValueType GetFromPortals( - vtkm::Id index, - const Portals&... portals) -{ - return { portals.Get(index)... }; -} - // SetToPortals: --------------------------------------------------------------- // Given a Vec-like object, and index, and a set of array portals, sets each of // the portals to the respective component of the Vec. @@ -137,14 +125,23 @@ public: VTKM_EXEC_CONT ValueType Get(vtkm::Id index) const { - return this->Portals.Apply(compvec::GetFromPortals, index); + auto getFromPortals = [index](const auto&... portals) { + return ValueType{ portals.Get(index)... }; + }; + return this->Portals.Apply(getFromPortals); } template ::type> VTKM_EXEC_CONT void Set(vtkm::Id index, const ValueType& value) const { - this->Portals.Apply(compvec::SetToPortals, index, value); + // Note that we are using a lambda function here to implicitly construct a + // functor to pass to Apply. Some device compilers will not allow passing a + // function or function pointer to Tuple::Apply. + auto setToPortal = [index, &value](const auto&... portals) { + compvec::SetToPortals(index, value, portals...); + }; + this->Portals.Apply(setToPortal); } }; From 51ca95bf6eb4215bf3dd0712a6cbcd79d51e8858 Mon Sep 17 00:00:00 2001 From: Abhishek Yenpure Date: Tue, 3 Jan 2023 08:47:08 -0800 Subject: [PATCH 20/29] Add VTKm_USE_DEFAULT_TYPES_FOR_ASCENT to VTKmConfig.cmake --- CMake/VTKmConfig.cmake.in | 1 + 1 file changed, 1 insertion(+) diff --git a/CMake/VTKmConfig.cmake.in b/CMake/VTKmConfig.cmake.in index 8c29b3eb6..c7d945191 100644 --- a/CMake/VTKmConfig.cmake.in +++ b/CMake/VTKmConfig.cmake.in @@ -77,6 +77,7 @@ set(VTKm_ENABLE_OSMESA_CONTEXT "@VTKm_ENABLE_OSMESA_CONTEXT@") set(VTKm_ENABLE_EGL_CONTEXT "@VTKm_ENABLE_EGL_CONTEXT@") set(VTKm_ENABLE_MPI "@VTKm_ENABLE_MPI@") set(VTKm_ENABLE_TESTING_LIBRARY "@VTKm_ENABLE_TESTING_LIBRARY@") +set(VTKm_USE_DEFAULT_TYPES_FOR_ASCENT "@VTKm_USE_DEFAULT_TYPES_FOR_ASCENT@") # This is true when the package is still in the build directory (not installed) if(CMAKE_CURRENT_LIST_DIR STREQUAL "@VTKm_BUILD_CMAKE_BASE_DIR@/@VTKm_INSTALL_CONFIG_DIR@") From 95c641559ded8afd94528de05543a82af28bd117 Mon Sep 17 00:00:00 2001 From: Mark Bolstad Date: Mon, 9 Jan 2023 08:42:23 -0700 Subject: [PATCH 21/29] Add cmake flag to override default ctest timeouts The current ctest timeout in VTK-m are fine for the CI, but are too long for develoment when a test is hanging on a queued node. This commit allows a developer to turn off the majority of the hard coded timeout values which allows them to set them at the ctest command-line. --- CMake/testing/VTKmTestWrappers.cmake | 52 +++++++++++++++++++--------- CMakeLists.txt | 6 ++++ 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/CMake/testing/VTKmTestWrappers.cmake b/CMake/testing/VTKmTestWrappers.cmake index 508bbd300..df20d1deb 100644 --- a/CMake/testing/VTKmTestWrappers.cmake +++ b/CMake/testing/VTKmTestWrappers.cmake @@ -298,34 +298,54 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ $ ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ${MPIEXEC_POSTFLAGS} ) - set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") + if (VTKm_OVERRIDE_CTEST_TIMEOUT) + set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + else() + set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + TIMEOUT ${timeout} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + endif() # VTKm_OVERRIDE_CTEST_TIMEOUT endif() # VTKm_ENABLE_MPI if ((NOT VTKm_ENABLE_MPI) OR VTKm_ENABLE_DIY_NOMPI) add_test(NAME ${tname}${upper_backend}_nompi COMMAND ${test_prog}_nompi ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ) - set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - + if (VTKm_OVERRIDE_CTEST_TIMEOUT) + set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + else() + set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + TIMEOUT ${timeout} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + endif() #VTKm_OVERRIDE_CTEST_TIMEOUT endif() # VTKm_ENABLE_DIY_NOMPI else() # VTKm_UT_MPI add_test(NAME ${tname}${upper_backend} COMMAND ${test_prog} ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ) - set_tests_properties("${tname}${upper_backend}" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") + if (VTKm_OVERRIDE_CTEST_TIMEOUT) + set_tests_properties("${tname}${upper_backend}" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + else() + set_tests_properties("${tname}${upper_backend}" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + TIMEOUT ${timeout} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + endif() #VTKm_OVERRIDE_CTEST_TIMEOUT endif() # VTKm_UT_MPI endforeach() endforeach() diff --git a/CMakeLists.txt b/CMakeLists.txt index 31572a24b..725199cc4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -181,6 +181,11 @@ vtkm_option(VTKm_NO_INSTALL_README_LICENSE "disable the installation of README a # Allow VTK to turn off these symlinks for its wheel distribution. vtkm_option(VTKm_SKIP_LIBRARY_VERSIONS "Skip versioning VTK-m libraries" OFF) +# During development, running unit tests with the default values can be too lengthy. +# Allow for the developer to skip the majority of the default values and control them +# through ctest's command-line. Doesn't affect CI unless enabled. +vtkm_option(VTKm_OVERRIDE_CTEST_TIMEOUT "Disable default ctest timeout" OFF) + mark_as_advanced( VTKm_ENABLE_LOGGING VTKm_NO_ASSERT @@ -191,6 +196,7 @@ mark_as_advanced( VTKm_ENABLE_DEVELOPER_FLAGS VTKm_NO_INSTALL_README_LICENSE VTKm_SKIP_LIBRARY_VERSIONS + VTKm_OVERRIDE_CTEST_TIMEOUT ) #----------------------------------------------------------------------------- From c5ce6cb96b945d94492770312da33f60004d23bc Mon Sep 17 00:00:00 2001 From: Mark Bolstad Date: Mon, 9 Jan 2023 10:12:27 -0700 Subject: [PATCH 22/29] Replace if/then with string parameter --- CMake/testing/VTKmTestWrappers.cmake | 58 +++++++++++----------------- 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/CMake/testing/VTKmTestWrappers.cmake b/CMake/testing/VTKmTestWrappers.cmake index df20d1deb..0763082c5 100644 --- a/CMake/testing/VTKmTestWrappers.cmake +++ b/CMake/testing/VTKmTestWrappers.cmake @@ -289,6 +289,12 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ list(GET per_device_timeout ${index} timeout) list(GET per_device_serial ${index} run_serial) + # If set, remove the VTK-m specified timeouts for CTest + set(EXTRA_ARGS) + if (NOT VTKm_OVERRIDE_CTEST_TIMEOUT) + list(APPEND EXTRA_ARGS TIMEOUT ${timeout}) + endif() + foreach (test ${VTKm_UT_SOURCES} ${VTKm_UT_DEVICE_SOURCES}) get_filename_component(tname ${test} NAME_WE) if(VTKm_UT_MPI) @@ -298,54 +304,34 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ $ ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ${MPIEXEC_POSTFLAGS} ) - if (VTKm_OVERRIDE_CTEST_TIMEOUT) - set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - else() - set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - endif() # VTKm_OVERRIDE_CTEST_TIMEOUT + set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + ${EXTRA_ARGS} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") endif() # VTKm_ENABLE_MPI if ((NOT VTKm_ENABLE_MPI) OR VTKm_ENABLE_DIY_NOMPI) add_test(NAME ${tname}${upper_backend}_nompi COMMAND ${test_prog}_nompi ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ) - if (VTKm_OVERRIDE_CTEST_TIMEOUT) - set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - else() - set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - endif() #VTKm_OVERRIDE_CTEST_TIMEOUT + set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + ${EXTRA_ARGS} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") + endif() # VTKm_ENABLE_DIY_NOMPI else() # VTKm_UT_MPI add_test(NAME ${tname}${upper_backend} COMMAND ${test_prog} ${tname} ${device_command_line_argument} ${vtkm_default_test_log_level} ${VTKm_UT_TEST_ARGS} ) - if (VTKm_OVERRIDE_CTEST_TIMEOUT) - set_tests_properties("${tname}${upper_backend}" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - else() - set_tests_properties("${tname}${upper_backend}" PROPERTIES - LABELS "${upper_backend};${VTKm_UT_LABEL}" - TIMEOUT ${timeout} - RUN_SERIAL ${run_serial} - FAIL_REGULAR_EXPRESSION "runtime error") - endif() #VTKm_OVERRIDE_CTEST_TIMEOUT + set_tests_properties("${tname}${upper_backend}" PROPERTIES + LABELS "${upper_backend};${VTKm_UT_LABEL}" + ${EXTRA_ARGS} + RUN_SERIAL ${run_serial} + FAIL_REGULAR_EXPRESSION "runtime error") endif() # VTKm_UT_MPI endforeach() endforeach() From 841da4169e84a10b9700fd8b65a83c45521b7709 Mon Sep 17 00:00:00 2001 From: Mark Bolstad Date: Mon, 9 Jan 2023 15:05:19 -0700 Subject: [PATCH 23/29] Follow better CMake/VTK-m practices --- CMake/testing/VTKmTestWrappers.cmake | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/CMake/testing/VTKmTestWrappers.cmake b/CMake/testing/VTKmTestWrappers.cmake index 0763082c5..536f0cf3f 100644 --- a/CMake/testing/VTKmTestWrappers.cmake +++ b/CMake/testing/VTKmTestWrappers.cmake @@ -290,9 +290,9 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ list(GET per_device_serial ${index} run_serial) # If set, remove the VTK-m specified timeouts for CTest - set(EXTRA_ARGS) + set(extra_args) if (NOT VTKm_OVERRIDE_CTEST_TIMEOUT) - list(APPEND EXTRA_ARGS TIMEOUT ${timeout}) + list(APPEND extra_args TIMEOUT ${timeout}) endif() foreach (test ${VTKm_UT_SOURCES} ${VTKm_UT_DEVICE_SOURCES}) @@ -306,7 +306,7 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ ) set_tests_properties("${tname}${upper_backend}_mpi" PROPERTIES LABELS "${upper_backend};${VTKm_UT_LABEL}" - ${EXTRA_ARGS} + ${extra_args} RUN_SERIAL ${run_serial} FAIL_REGULAR_EXPRESSION "runtime error") endif() # VTKm_ENABLE_MPI @@ -317,7 +317,7 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ ) set_tests_properties("${tname}${upper_backend}_nompi" PROPERTIES LABELS "${upper_backend};${VTKm_UT_LABEL}" - ${EXTRA_ARGS} + ${extra_args} RUN_SERIAL ${run_serial} FAIL_REGULAR_EXPRESSION "runtime error") @@ -329,11 +329,12 @@ vtkm_unit_tests but not in its test dependencies. Add test dependencies to \ ) set_tests_properties("${tname}${upper_backend}" PROPERTIES LABELS "${upper_backend};${VTKm_UT_LABEL}" - ${EXTRA_ARGS} + ${extra_args} RUN_SERIAL ${run_serial} FAIL_REGULAR_EXPRESSION "runtime error") endif() # VTKm_UT_MPI endforeach() + unset(extra_args) endforeach() endfunction(vtkm_unit_tests) From f275972e3b007f3340e7c6016770985c1e7cca66 Mon Sep 17 00:00:00 2001 From: Kenneth Moreland Date: Tue, 10 Jan 2023 09:16:33 -0700 Subject: [PATCH 24/29] Resolve sprintf warning The latest verson of Xcode clang warns about using sprintf because of its inherent vulnerability. Change it to snprintf. --- vtkm/filter/scalar_topology/worklet/contourtree/PrintVectors.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vtkm/filter/scalar_topology/worklet/contourtree/PrintVectors.h b/vtkm/filter/scalar_topology/worklet/contourtree/PrintVectors.h index 418b1fa3e..3c7d16db3 100644 --- a/vtkm/filter/scalar_topology/worklet/contourtree/PrintVectors.h +++ b/vtkm/filter/scalar_topology/worklet/contourtree/PrintVectors.h @@ -112,7 +112,7 @@ void PrintLabelledBlock(std::string label, inline std::string NumString(vtkm::Id number) { // NumString() char strBuf[20]; - sprintf(strBuf, "%1d", static_cast(number)); + snprintf(strBuf, 19, "%1d", static_cast(number)); return std::string(strBuf); } // NumString() From c0e0032e1d7c5a94e3ae944adc3956ac40b54a97 Mon Sep 17 00:00:00 2001 From: Kenneth Moreland Date: Mon, 9 Jan 2023 10:26:57 -0700 Subject: [PATCH 25/29] Clarify field index ordering in Doxygen The fields in a `DataSet` are indexed from `0` to `GetNumberOfFields() - 1`. It is natural to assume that the fields will be indexed in the order that they are added, but they are not. Rather, the indexing is arbitrary and can change any time a field is added to the dataset. To make this more clear, Doxygen documentation is added to the `DataSet` methods to inform users to not make any assumptions about the order of field indexing. --- docs/changelog/document-field-index-order.md | 10 ++ vtkm/cont/DataSet.h | 168 ++++++++++++------- 2 files changed, 121 insertions(+), 57 deletions(-) create mode 100644 docs/changelog/document-field-index-order.md diff --git a/docs/changelog/document-field-index-order.md b/docs/changelog/document-field-index-order.md new file mode 100644 index 000000000..53b00d318 --- /dev/null +++ b/docs/changelog/document-field-index-order.md @@ -0,0 +1,10 @@ +# Clarify field index ordering in Doxygen + +The fields in a `DataSet` are indexed from `0` to `GetNumberOfFields() - 1`. +It is natural to assume that the fields will be indexed in the order that +they are added, but they are not. Rather, the indexing is arbitrary and can +change any time a field is added to the dataset. + +To make this more clear, Doxygen documentation is added to the `DataSet` +methods to inform users to not make any assumptions about the order of +field indexing. diff --git a/vtkm/cont/DataSet.h b/vtkm/cont/DataSet.h index 8e4f8e30b..2cef4af8d 100644 --- a/vtkm/cont/DataSet.h +++ b/vtkm/cont/DataSet.h @@ -46,22 +46,37 @@ public: VTKM_CONT void Clear(); - /// Get the number of cells contained in this DataSet + /// \brief Get the number of cells contained in this DataSet VTKM_CONT vtkm::Id GetNumberOfCells() const; - /// Get the number of points contained in this DataSet + /// \brief Get the number of points contained in this DataSet /// /// Note: All coordinate systems for a DataSet are expected /// to have the same number of points. VTKM_CONT vtkm::Id GetNumberOfPoints() const; + /// \brief Adds a field to the `DataSet`. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. VTKM_CONT void AddField(const Field& field); + ///@{ + /// \brief Retrieves a field by index. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. This method is most useful for iterating over all the fields of + /// a `DataSet` (indexed from `0` to `NumberOfFields() - 1`). VTKM_CONT const vtkm::cont::Field& GetField(vtkm::Id index) const { return this->Fields.GetField(index); } VTKM_CONT vtkm::cont::Field& GetField(vtkm::Id index) { return this->Fields.GetField(index); } + ///@} VTKM_CONT bool HasField(const std::string& name, @@ -89,8 +104,14 @@ public: } - /// Returns the field that matches the provided name and association - /// Will return -1 if no match is found + /// \brief Returns the field that matches the provided name and association. + /// + /// This method will return -1 if no match for the field is found. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. VTKM_CONT vtkm::Id GetFieldIndex( const std::string& name, @@ -99,8 +120,10 @@ public: return this->Fields.GetFieldIndex(name, assoc); } - /// Returns the field that matches the provided name and association - /// Will throw an exception if no match is found + /// \brief Returns the field that matches the provided name and association. + /// + /// This method will throw an exception if no match is found. Use `HasField()` to query + /// whether a particular field exists. ///@{ VTKM_CONT const vtkm::cont::Field& GetField( @@ -120,8 +143,10 @@ public: } ///@} - /// Returns the first cell field that matches the provided name. - /// Will throw an exception if no match is found + /// \brief Returns the first cell field that matches the provided name. + /// + /// This method will throw an exception if no match is found. Use `HasCellField()` to query + /// whether a particular field exists. ///@{ VTKM_CONT const vtkm::cont::Field& GetCellField(const std::string& name) const @@ -136,15 +161,19 @@ public: } ///@} - /// Returns the cell field that matches the ghost cell field name. - /// Will throw an exception if no match is found + /// \brief Returns the cell field that matches the ghost cell field name. + /// + /// This method will throw an exception if no match is found. Use `HasGhostCellField()` to query + /// whether a particular field exists. ///@{ VTKM_CONT const vtkm::cont::Field& GetGhostCellField() const; ///@} - /// Returns the first point field that matches the provided name. - /// Will throw an exception if no match is found + /// \brief Returns the first point field that matches the provided name. + /// + /// This method will throw an exception if no match is found. Use `HasPointField()` to query + /// whether a particular field exists. ///@{ VTKM_CONT const vtkm::cont::Field& GetPointField(const std::string& name) const @@ -159,6 +188,13 @@ public: } ///@} + ///@{ + /// \brief Adds a point field of a given name to the `DataSet`. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. VTKM_CONT void AddPointField(const std::string& fieldName, const vtkm::cont::UnknownArrayHandle& field) { @@ -185,58 +221,21 @@ public: this->AddField( make_Field(fieldName, vtkm::cont::Field::Association::Points, field, n, vtkm::CopyFlag::On)); } + ///@} - //Cell centered field + ///@{ + /// \brief Adds a cell field of a given name to the `DataSet`. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. VTKM_CONT void AddCellField(const std::string& fieldName, const vtkm::cont::UnknownArrayHandle& field) { this->AddField(make_FieldCell(fieldName, field)); } - /// \brief Sets the name of the field to use for cell ghost levels. - /// - /// This value can be set regardless of whether such a cell field actually exists. - VTKM_CONT void SetGhostCellFieldName(const std::string& name); - - /// \brief Sets the cell field of the given name as the cell ghost levels. - /// - /// If a cell field of the given name does not exist, an exception is thrown. - VTKM_CONT void SetGhostCellField(const std::string& name); - - ///@{ - /// \brief Sets the ghost cell levels. - /// - /// A field of the given name is added to the `DataSet`, and that field is set as the cell - /// ghost levels. - VTKM_CONT void SetGhostCellField(const vtkm::cont::Field& field); - VTKM_CONT void SetGhostCellField(const std::string& fieldName, - const vtkm::cont::UnknownArrayHandle& field); - ///@} - - /// \brief Sets the ghost cell levels to the given array. - /// - /// A field with the global ghost cell field name (see `GlobalGhostCellFieldName`) is added - /// to the `DataSet` and made to be the cell ghost levels. - VTKM_CONT void SetGhostCellField(const vtkm::cont::UnknownArrayHandle& field); - - VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") - VTKM_CONT - void AddGhostCellField(const std::string& fieldName, const vtkm::cont::UnknownArrayHandle& field) - { - this->SetGhostCellField(fieldName, field); - } - - VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") - VTKM_CONT - void AddGhostCellField(const vtkm::cont::UnknownArrayHandle& field) - { - this->SetGhostCellField(field); - } - - VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") - VTKM_CONT - void AddGhostCellField(const vtkm::cont::Field& field) { this->SetGhostCellField(field); } - template VTKM_CONT void AddCellField(const std::string& fieldName, const vtkm::cont::ArrayHandle& field) @@ -257,6 +256,61 @@ public: this->AddField( make_Field(fieldName, vtkm::cont::Field::Association::Cells, field, n, vtkm::CopyFlag::On)); } + ///@} + + /// \brief Sets the name of the field to use for cell ghost levels. + /// + /// This value can be set regardless of whether such a cell field actually exists. + VTKM_CONT void SetGhostCellFieldName(const std::string& name); + + /// \brief Sets the cell field of the given name as the cell ghost levels. + /// + /// If a cell field of the given name does not exist, an exception is thrown. + VTKM_CONT void SetGhostCellField(const std::string& name); + + ///@{ + /// \brief Sets the ghost cell levels. + /// + /// A field of the given name is added to the `DataSet`, and that field is set as the cell + /// ghost levels. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. + VTKM_CONT void SetGhostCellField(const vtkm::cont::Field& field); + VTKM_CONT void SetGhostCellField(const std::string& fieldName, + const vtkm::cont::UnknownArrayHandle& field); + ///@} + + /// \brief Sets the ghost cell levels to the given array. + /// + /// A field with the global ghost cell field name (see `GlobalGhostCellFieldName`) is added + /// to the `DataSet` and made to be the cell ghost levels. + /// + /// Note that the indexing of fields is not the same as the order in which they are + /// added, and that adding a field can arbitrarily reorder the integer indexing of + /// all the fields. To retrieve a specific field, retrieve the field by name, not by + /// integer index. + VTKM_CONT void SetGhostCellField(const vtkm::cont::UnknownArrayHandle& field); + + VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") + VTKM_CONT + void AddGhostCellField(const std::string& fieldName, const vtkm::cont::UnknownArrayHandle& field) + { + this->SetGhostCellField(fieldName, field); + } + + VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") + VTKM_CONT + void AddGhostCellField(const vtkm::cont::UnknownArrayHandle& field) + { + this->SetGhostCellField(field); + } + + VTKM_DEPRECATED(2.0, "Use SetGhostCellField.") + VTKM_CONT + void AddGhostCellField(const vtkm::cont::Field& field) { this->SetGhostCellField(field); } /// \brief Adds the given `CoordinateSystem` to the `DataSet`. From 0e5aeb10fe6a8efba0873141784db6a39586374a Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:08:58 -0800 Subject: [PATCH 26/29] Update contour_tree_distributed/CMakeLists.txt --- examples/contour_tree_distributed/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/contour_tree_distributed/CMakeLists.txt b/examples/contour_tree_distributed/CMakeLists.txt index 418e9d5d5..ac605eeed 100644 --- a/examples/contour_tree_distributed/CMakeLists.txt +++ b/examples/contour_tree_distributed/CMakeLists.txt @@ -59,7 +59,7 @@ find_package(VTKm REQUIRED QUIET) # MPI #################################### if (VTKm_ENABLE_MPI AND TARGET vtkm::filter_scalar_topology AND TARGET vtkm::io) - add_executable(ContourTree_Distributed ContourTreeApp.cxx, ContourTreeAppDataIO.h) + add_executable(ContourTree_Distributed ContourTreeApp.cxx ContourTreeAppDataIO.h) target_link_libraries(ContourTree_Distributed vtkm::filter_scalar_topology vtkm::io MPI::MPI_CXX) vtkm_add_target_information(ContourTree_Distributed MODIFY_CUDA_FLAGS @@ -87,7 +87,7 @@ if (VTKm_ENABLE_MPI AND TARGET vtkm::filter_scalar_topology AND TARGET vtkm::io) vtkm_add_target_information(TreeCompiler DROP_UNUSED_SYMBOLS) add_executable(BranchCompiler BranchCompilerApp.cxx) - target_link_libraries(BranchCompiler vtkm::filter) + target_link_libraries(BranchCompiler vtkm::filter_scalar_topology) vtkm_add_target_information(BranchCompiler DROP_UNUSED_SYMBOLS) configure_file(split_data_2d.py split_data_2d.py COPYONLY) From 27d3d403f6d30ea69881f49cf054fcae9e0f9ac1 Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:25:17 -0800 Subject: [PATCH 27/29] Remove bade import in streamline_mpi example --- examples/streamline_mpi/StreamlineMPI.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/streamline_mpi/StreamlineMPI.cxx b/examples/streamline_mpi/StreamlineMPI.cxx index aec518cec..1de54fd24 100644 --- a/examples/streamline_mpi/StreamlineMPI.cxx +++ b/examples/streamline_mpi/StreamlineMPI.cxx @@ -18,7 +18,6 @@ #include #include #include -#include #include #include From 113e6be327e94f0fa65779058ce899fd05339d1e Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:25:48 -0800 Subject: [PATCH 28/29] Remove bad import in ContourTreeApp distributed --- examples/contour_tree_distributed/ContourTreeApp.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/contour_tree_distributed/ContourTreeApp.cxx b/examples/contour_tree_distributed/ContourTreeApp.cxx index 8d375a295..7b836a135 100644 --- a/examples/contour_tree_distributed/ContourTreeApp.cxx +++ b/examples/contour_tree_distributed/ContourTreeApp.cxx @@ -64,7 +64,6 @@ #include #include #include -#include #include #include #include From 44c276174002cea36813c82eabad59c108e54c81 Mon Sep 17 00:00:00 2001 From: oruebel Date: Mon, 16 Jan 2023 18:26:32 -0800 Subject: [PATCH 29/29] Remove bad import and fix warning in ContourTreeAppDataIO.h --- .../ContourTreeAppDataIO.h | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/examples/contour_tree_distributed/ContourTreeAppDataIO.h b/examples/contour_tree_distributed/ContourTreeAppDataIO.h index badbe61d1..acbf32b1c 100644 --- a/examples/contour_tree_distributed/ContourTreeAppDataIO.h +++ b/examples/contour_tree_distributed/ContourTreeAppDataIO.h @@ -61,7 +61,6 @@ #include #include #include -#include #include #include #include @@ -222,24 +221,24 @@ bool read3DHDF5File(const int& mpi_rank, count[2] = count[2] + 1; } // Check that we are not running over the end of the dataset - if (offset[0] + count[0] > globalSize[0]) + if (vtkm::Id(offset[0] + count[0]) > globalSize[0]) { count[0] = globalSize[0] - offset[0]; } - if (offset[1] + count[1] > globalSize[1]) + if (vtkm::Id(offset[1] + count[1]) > globalSize[1]) { count[1] = globalSize[1] - offset[1]; } - if (offset[2] + count[2] > globalSize[2]) + if (vtkm::Id(offset[2] + count[2]) > globalSize[2]) { count[2] = globalSize[2] - offset[2]; } blockSize = vtkm::Id3{ static_cast(count[0]), static_cast(count[1]), static_cast(count[2]) }; - vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), + /*vtkm::Id3 blockOrigin = vtkm::Id3{ static_cast(offset[0]), static_cast(offset[1]), - static_cast(offset[2]) }; + static_cast(offset[2]) };*/ // Define the hyperslap to read the data into memory status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); @@ -254,11 +253,11 @@ bool read3DHDF5File(const int& mpi_rank, double data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -270,11 +269,11 @@ bool read3DHDF5File(const int& mpi_rank, float data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -286,11 +285,11 @@ bool read3DHDF5File(const int& mpi_rank, int data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); } @@ -302,11 +301,11 @@ bool read3DHDF5File(const int& mpi_rank, unsigned char data_out[count[0]][count[1]][count[2]]; // output buffer status = H5Dread(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, data_out); // Copy data to 1D array of the expected ValueType - for (vtkm::Id k = 0; k < count[0]; k++) + for (hsize_t k = 0; k < count[0]; k++) { - for (vtkm::Id j = 0; j < count[1]; j++) + for (hsize_t j = 0; j < count[1]; j++) { - for (vtkm::Id i = 0; i < count[2]; i++) + for (hsize_t i = 0; i < count[2]; i++) { values[to1DIndex(vtkm::Id3(k, j, i), blockSize)] = ValueType(data_out[k][j][i]); }