adding mpi-only filter example.

Adding a Histogram filter example that uses MPI (instead of DIY)
for distributed processing.
This commit is contained in:
Utkarsh Ayachit 2018-04-10 20:11:32 -04:00
parent 4610e8b167
commit d37c1fee79
5 changed files with 492 additions and 0 deletions

@ -30,6 +30,7 @@ add_subdirectory(demo)
add_subdirectory(dynamic_dispatcher)
add_subdirectory(game_of_life)
add_subdirectory(hello_world)
add_subdirectory(histogram)
add_subdirectory(isosurface)
add_subdirectory(multi_backend)
add_subdirectory(particle_advection)

@ -0,0 +1,48 @@
##=============================================================================
##
## Copyright (c) Kitware, Inc.
## All rights reserved.
## See LICENSE.txt for details.
##
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
##
## Copyright 2015 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
## Copyright 2015 UT-Battelle, LLC.
## Copyright 2015 Los Alamos National Security.
##
## Under the terms of Contract DE-NA0003525 with NTESS,
## the U.S. Government retains certain rights in this software.
## Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
## Laboratory (LANL), the U.S. Government retains certain rights in
## this software.
##
##=============================================================================
cmake_minimum_required(VERSION 3.3 FATAL_ERROR)
project(Histogram CXX)
#Find the VTK-m package
find_package(VTKm REQUIRED QUIET)
if (VTKm_ENABLE_MPI)
add_executable(Histogram_SERIAL Histogram.cxx HistogramMPI.h HistogramMPI.hxx)
target_compile_definitions(Histogram_SERIAL PRIVATE
"VTKM_DEVICE_ADAPTER=VTKM_DEVICE_ADAPTER_SERIAL")
target_link_libraries(Histogram_SERIAL PRIVATE vtkm_cont)
if(TARGET vtkm::tbb)
add_executable(Histogram_TBB Histogram.cxx HistogramMPI.h HistogramMPI.hxx)
target_compile_definitions(Histogram_TBB PRIVATE
"VTKM_DEVICE_ADAPTER=VTKM_DEVICE_ADAPTER_TBB")
target_link_libraries(Histogram_TBB PRIVATE vtkm_cont)
endif()
if(TARGET vtkm::cuda)
vtkm_compile_as_cuda(cudaSource Histogram.cxx HistogramMPI.h HistogramMPI.hxx)
add_executable(Histogram_CUDA ${cudaSource})
target_compile_definitions(Histogram_CUDA PRIVATE
"VTKM_DEVICE_ADAPTER=VTKM_DEVICE_ADAPTER_CUDA")
target_link_libraries(Histogram_CUDA PRIVATE vtkm_cont)
endif()
endif()

@ -0,0 +1,124 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
/*
* This example demonstrates how one can write a filter that uses MPI
* for hybrid-parallelism. The `vtkm::filter::Histogram` is another approach for
* implementing the same that uses DIY. This example doesn't use DIY, instead
* uses MPI calls directly.
*/
#include "HistogramMPI.h"
#include <vtkm/cont/ArrayPortalToIterators.h>
#include <vtkm/cont/DataSetFieldAdd.h>
#include <vtkm/cont/EnvironmentTracker.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include VTKM_DIY(diy/mpi.hpp)
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
#include <mpi.h>
#include <algorithm>
#include <numeric>
#include <random>
#include <utility>
#include <vector>
namespace
{
template <typename T>
VTKM_CONT vtkm::cont::ArrayHandle<T> CreateArray(T min, T max, vtkm::Id numVals)
{
std::mt19937 gen;
std::uniform_real_distribution<double> dis(static_cast<double>(min), static_cast<double>(max));
vtkm::cont::ArrayHandle<T> handle;
handle.Allocate(numVals);
std::generate(vtkm::cont::ArrayPortalToIteratorBegin(handle.GetPortalControl()),
vtkm::cont::ArrayPortalToIteratorEnd(handle.GetPortalControl()),
[&]() { return static_cast<T>(dis(gen)); });
return handle;
}
}
int main(int argc, char* argv[])
{
// setup MPI environment.
MPI_Init(&argc, &argv);
// tell VTK-m the communicator to use.
vtkm::cont::EnvironmentTracker::SetCommunicator(diy::mpi::communicator(MPI_COMM_WORLD));
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (argc != 2)
{
if (rank == 0)
{
std::cout << "Usage: " << std::endl << "$ " << argv[0] << " <num-bins>" << std::endl;
}
MPI_Finalize();
return EXIT_FAILURE;
}
const vtkm::Id num_bins = static_cast<vtkm::Id>(std::atoi(argv[1]));
const vtkm::Id numVals = 1024;
vtkm::cont::MultiBlock mb;
vtkm::cont::DataSet ds;
vtkm::cont::DataSetFieldAdd::AddPointField(ds, "pointvar", CreateArray(-1024, 1024, numVals));
mb.AddBlock(ds);
example::HistogramMPI histogram;
histogram.SetActiveField("pointvar");
histogram.SetNumberOfBins(std::max<vtkm::Id>(1, num_bins));
vtkm::cont::MultiBlock result = histogram.Execute(mb);
vtkm::cont::ArrayHandle<vtkm::Id> bins;
result.GetBlock(0).GetField("histogram").GetData().CopyTo(bins);
auto binPortal = bins.GetPortalConstControl();
if (rank == 0)
{
// print histogram.
std::cout << "Histogram (" << num_bins << ")" << std::endl;
vtkm::Id count = 0;
for (vtkm::Id cc = 0; cc < num_bins; ++cc)
{
std::cout << " bin[" << cc << "] = " << binPortal.Get(cc) << std::endl;
count += binPortal.Get(cc);
}
if (count != numVals * size)
{
std::cout << "ERROR: bins mismatched!" << std::endl;
MPI_Finalize();
return EXIT_FAILURE;
}
}
MPI_Finalize();
return EXIT_SUCCESS;
}

@ -0,0 +1,115 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#ifndef vtk_m_examples_histogram_HistogramMPI_h
#define vtk_m_examples_histogram_HistogramMPI_h
#include <vtkm/filter/FilterField.h>
#include <vtkm/filter/FilterTraits.h>
namespace example
{
/// \brief Construct the HistogramMPI of a given Field
///
/// Construct a HistogramMPI with a default of 10 bins.
///
class HistogramMPI : public vtkm::filter::FilterField<HistogramMPI>
{
public:
//Construct a HistogramMPI with a default of 10 bins
VTKM_CONT
HistogramMPI();
VTKM_CONT
void SetNumberOfBins(vtkm::Id count) { this->NumberOfBins = count; }
VTKM_CONT
vtkm::Id GetNumberOfBins() const { return this->NumberOfBins; }
//@{
/// Get/Set the range to use to generate the HistogramMPI. If range is set to
/// empty, the field's global range (computed using `vtkm::cont::FieldRangeGlobalCompute`)
/// will be used.
VTKM_CONT
void SetRange(const vtkm::Range& range) { this->Range = range; }
VTKM_CONT
const vtkm::Range& GetRange() const { return this->Range; }
//@}
/// Returns the bin delta of the last computed field.
VTKM_CONT
vtkm::Float64 GetBinDelta() const { return this->BinDelta; }
/// Returns the range used for most recent execute. If `SetRange` is used to
/// specify and non-empty range, then this will be same as the range after
/// the `Execute` call.
VTKM_CONT
vtkm::Range GetComputedRange() const { return this->ComputedRange; }
template <typename T, typename StorageType, typename DerivedPolicy, typename DeviceAdapter>
VTKM_CONT vtkm::cont::DataSet DoExecute(const vtkm::cont::DataSet& input,
const vtkm::cont::ArrayHandle<T, StorageType>& field,
const vtkm::filter::FieldMetadata& fieldMeta,
const vtkm::filter::PolicyBase<DerivedPolicy>& policy,
const DeviceAdapter& tag);
//@{
/// when operating on vtkm::cont::MultiBlock, we
/// want to do processing across ranks as well. Just adding pre/post handles
/// for the same does the trick.
template <typename DerivedPolicy>
VTKM_CONT void PreExecute(const vtkm::cont::MultiBlock& input,
const vtkm::filter::PolicyBase<DerivedPolicy>& policy);
template <typename DerivedPolicy>
VTKM_CONT void PostExecute(const vtkm::cont::MultiBlock& input,
vtkm::cont::MultiBlock& output,
const vtkm::filter::PolicyBase<DerivedPolicy>&);
//@}
private:
vtkm::Id NumberOfBins;
vtkm::Float64 BinDelta;
vtkm::Range ComputedRange;
vtkm::Range Range;
};
} // namespace example
namespace vtkm
{
namespace filter
{
template <>
class FilterTraits<example::HistogramMPI>
{ //currently the HistogramMPI filter only works on scalar data.
//this mainly has to do with getting the ranges for each bin
//would require returning a more complex value type
public:
using InputFieldTypeList = vtkm::TypeListTagScalarAll;
};
}
} // namespace vtkm::filter
#include "HistogramMPI.hxx"
#endif // vtk_m_filter_Histogram_h

@ -0,0 +1,204 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#include <vtkm/worklet/DispatcherMapField.h>
#include <vtkm/worklet/FieldHistogram.h>
#include <vtkm/cont/Algorithm.h>
#include <vtkm/cont/ArrayPortalToIterators.h>
#include <vtkm/cont/AssignerMultiBlock.h>
#include <vtkm/cont/EnvironmentTracker.h>
#include <vtkm/cont/ErrorFilterExecution.h>
#include <vtkm/cont/FieldRangeGlobalCompute.h>
// clang-format off
VTKM_THIRDPARTY_PRE_INCLUDE
#include VTKM_DIY(diy/mpi.hpp)
VTKM_THIRDPARTY_POST_INCLUDE
// clang-format on
namespace example
{
namespace detail
{
class DistributedHistogram
{
std::vector<vtkm::cont::ArrayHandle<vtkm::Id>> LocalBlocks;
public:
DistributedHistogram(vtkm::Id numLocalBlocks)
: LocalBlocks(static_cast<size_t>(numLocalBlocks))
{
}
void SetLocalHistogram(vtkm::Id index, const vtkm::cont::ArrayHandle<vtkm::Id>& bins)
{
this->LocalBlocks[static_cast<size_t>(index)] = bins;
}
void SetLocalHistogram(vtkm::Id index, const vtkm::cont::Field& field)
{
this->SetLocalHistogram(index, field.GetData().Cast<vtkm::cont::ArrayHandle<vtkm::Id>>());
}
vtkm::cont::ArrayHandle<vtkm::Id> ReduceAll(const vtkm::Id numBins) const
{
const vtkm::Id numLocalBlocks = static_cast<vtkm::Id>(this->LocalBlocks.size());
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
if (comm.size() == 1 && numLocalBlocks <= 1)
{
// no reduction necessary.
return numLocalBlocks == 0 ? vtkm::cont::ArrayHandle<vtkm::Id>() : this->LocalBlocks[0];
}
// reduce local bins first.
vtkm::cont::ArrayHandle<vtkm::Id> local;
local.Allocate(numBins);
std::fill(vtkm::cont::ArrayPortalToIteratorBegin(local.GetPortalControl()),
vtkm::cont::ArrayPortalToIteratorEnd(local.GetPortalControl()),
static_cast<vtkm::Id>(0));
for (const auto& lbins : this->LocalBlocks)
{
vtkm::cont::Algorithm::Transform(local, lbins, local, vtkm::Add());
}
// now reduce across ranks using MPI.
// converting to std::vector
std::vector<vtkm::Id> send_buf(numBins);
std::copy(vtkm::cont::ArrayPortalToIteratorBegin(local.GetPortalConstControl()),
vtkm::cont::ArrayPortalToIteratorEnd(local.GetPortalConstControl()),
send_buf.begin());
std::vector<vtkm::Id> recv_buf(numBins);
MPI_Reduce(&send_buf[0],
&recv_buf[0],
static_cast<int>(numBins),
sizeof(vtkm::Id) == 4 ? MPI_INT : MPI_LONG,
MPI_SUM,
0,
comm);
if (comm.rank() == 0)
{
local.Allocate(numBins);
std::copy(recv_buf.begin(),
recv_buf.end(),
vtkm::cont::ArrayPortalToIteratorBegin(local.GetPortalControl()));
return local;
}
return vtkm::cont::ArrayHandle<vtkm::Id>();
}
};
} // namespace detail
//-----------------------------------------------------------------------------
inline VTKM_CONT HistogramMPI::HistogramMPI()
: NumberOfBins(10)
, BinDelta(0)
, ComputedRange()
, Range()
{
this->SetOutputFieldName("histogram");
}
//-----------------------------------------------------------------------------
template <typename T, typename StorageType, typename DerivedPolicy, typename DeviceAdapter>
inline VTKM_CONT vtkm::cont::DataSet HistogramMPI::DoExecute(
const vtkm::cont::DataSet&,
const vtkm::cont::ArrayHandle<T, StorageType>& field,
const vtkm::filter::FieldMetadata&,
const vtkm::filter::PolicyBase<DerivedPolicy>&,
const DeviceAdapter& device)
{
vtkm::cont::ArrayHandle<vtkm::Id> binArray;
T delta;
vtkm::worklet::FieldHistogram worklet;
if (this->ComputedRange.IsNonEmpty())
{
worklet.Run(field,
this->NumberOfBins,
static_cast<T>(this->ComputedRange.Min),
static_cast<T>(this->ComputedRange.Max),
delta,
binArray,
device);
}
else
{
worklet.Run(field, this->NumberOfBins, this->ComputedRange, delta, binArray, device);
}
this->BinDelta = static_cast<vtkm::Float64>(delta);
vtkm::cont::DataSet output;
vtkm::cont::Field rfield(
this->GetOutputFieldName(), vtkm::cont::Field::ASSOC_WHOLE_MESH, binArray);
output.AddField(rfield);
return output;
}
//-----------------------------------------------------------------------------
template <typename DerivedPolicy>
inline VTKM_CONT void HistogramMPI::PreExecute(const vtkm::cont::MultiBlock& input,
const vtkm::filter::PolicyBase<DerivedPolicy>&)
{
if (this->Range.IsNonEmpty())
{
this->ComputedRange = this->Range;
}
else
{
auto handle = vtkm::cont::FieldRangeGlobalCompute(
input, this->GetActiveFieldName(), this->GetActiveFieldAssociation());
if (handle.GetNumberOfValues() != 1)
{
throw vtkm::cont::ErrorFilterExecution("expecting scalar field.");
}
this->ComputedRange = handle.GetPortalConstControl().Get(0);
}
}
//-----------------------------------------------------------------------------
template <typename DerivedPolicy>
inline VTKM_CONT void HistogramMPI::PostExecute(const vtkm::cont::MultiBlock&,
vtkm::cont::MultiBlock& result,
const vtkm::filter::PolicyBase<DerivedPolicy>&)
{
// iterate and compute HistogramMPI for each local block.
detail::DistributedHistogram helper(result.GetNumberOfBlocks());
for (vtkm::Id cc = 0; cc < result.GetNumberOfBlocks(); ++cc)
{
auto& ablock = result.GetBlock(cc);
helper.SetLocalHistogram(cc, ablock.GetField(this->GetOutputFieldName()));
}
vtkm::cont::DataSet output;
vtkm::cont::Field rfield(this->GetOutputFieldName(),
vtkm::cont::Field::ASSOC_WHOLE_MESH,
helper.ReduceAll(this->NumberOfBins));
output.AddField(rfield);
result = vtkm::cont::MultiBlock(output);
}
} // namespace example