Begin migrating vtkh image compositor over

This commit is contained in:
Dave Pugmire 2023-01-19 15:20:27 -05:00
parent 1037b80bb5
commit 4522f91807
27 changed files with 4036 additions and 1 deletions

@ -8,6 +8,8 @@
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/io/VTKDataSetWriter.h>
#include <vtkm/cont/Initialize.h>
#include <vtkm/source/Tangle.h>
@ -39,6 +41,9 @@ int main(int argc, char* argv[])
vtkm::cont::DataSet tangleData = tangle.Execute();
std::string fieldName = "tangle";
vtkm::io::VTKDataSetWriter writer("tangle.vtk");
writer.WriteDataSet(tangleData);
// Set up a camera for rendering the input data
vtkm::rendering::Camera camera;
camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5));
@ -57,7 +62,7 @@ int main(int argc, char* argv[])
vtkm::rendering::Scene scene;
scene.AddActor(actor);
// 2048x2048 pixels in the canvas:
CanvasRayTracer canvas(2048, 2048);
CanvasRayTracer canvas(512, 512);
// Create a view and use it to render the input data using OS Mesa
vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg);

@ -55,6 +55,10 @@ set(headers
View3D.h
Wireframer.h
WorldAnnotator.h
compositing/Compositor.h
compositing/Image.h
compositing/PNGEncoder.h
)
set(sources
@ -86,6 +90,13 @@ set(sources
raytracing/Logger.cxx
raytracing/MeshConnectivityContainers.cxx
raytracing/TriangleExtractor.cxx
compositing/Compositor.cxx
compositing/Image.cxx
compositing/PNGEncoder.cxx
compositing/RadixKCompositor.cxx
compositing/PayloadCompositor.cxx
compositing/PayloadImage.cxx
)
# This list of sources has code that uses devices and so might need to be
@ -147,6 +158,10 @@ if(UNIX AND NOT APPLE)
target_link_libraries(vtkm_rendering PRIVATE rt)
endif()
if (VTKm_ENABLE_MPI)
target_link_libraries(vtkm_rendering PUBLIC MPI::MPI_CXX)
endif()
#-----------------------------------------------------------------------------
add_subdirectory(internal)
add_subdirectory(raytracing)

@ -0,0 +1,208 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/cont/EnvironmentTracker.h>
#include <vtkm/cont/ErrorBadValue.h>
#include <algorithm>
#include <vtkm/rendering/compositing/Compositor.h>
#include <vtkm/rendering/compositing/ImageCompositor.h>
#ifdef VTKM_ENABLE_MPI
#include <mpi.h>
#include <vtkm/thirdparty/diy/mpi-cast.h>
//#include <vtkh/compositing/DirectSendCompositor.hpp>
#include <vtkm/rendering/compositing/RadixKCompositor.h>
#endif
namespace vtkm
{
namespace rendering
{
namespace compositing
{
Compositor::Compositor()
: m_composite_mode(Z_BUFFER_SURFACE)
{
}
Compositor::~Compositor() {}
void Compositor::SetCompositeMode(CompositeMode composite_mode)
{
// assure we don't have mixed image types
assert(m_images.size() == 0);
m_composite_mode = composite_mode;
}
void Compositor::ClearImages()
{
m_images.clear();
}
void Compositor::AddImage(const unsigned char* color_buffer,
const float* depth_buffer,
const int width,
const int height)
{
assert(m_composite_mode != VIS_ORDER_BLEND);
assert(depth_buffer != NULL);
Image image;
if (m_images.size() == 0)
{
m_images.push_back(image);
m_images[0].Init(color_buffer, depth_buffer, width, height);
//m_images[0].Save("first.png");
}
else if (m_composite_mode == Z_BUFFER_SURFACE)
{
//
// Do local composite and keep a single image
//
image.Init(color_buffer, depth_buffer, width, height);
vtkm::rendering::compositing::ImageCompositor compositor;
compositor.ZBufferComposite(m_images[0], image);
}
else
{
const size_t image_index = m_images.size();
m_images.push_back(image);
m_images[image_index].Init(color_buffer, depth_buffer, width, height);
}
}
void Compositor::AddImage(const float* color_buffer,
const float* depth_buffer,
const int width,
const int height)
{
assert(m_composite_mode != VIS_ORDER_BLEND);
assert(depth_buffer != NULL);
Image image;
if (m_images.size() == 0)
{
m_images.push_back(image);
m_images[0].Init(color_buffer, depth_buffer, width, height);
}
else if (m_composite_mode == Z_BUFFER_SURFACE)
{
//
// Do local composite and keep a single image
//
image.Init(color_buffer, depth_buffer, width, height);
vtkm::rendering::compositing::ImageCompositor compositor;
compositor.ZBufferComposite(m_images[0], image);
}
else
{
const size_t image_index = m_images.size();
m_images.push_back(image);
m_images[image_index].Init(color_buffer, depth_buffer, width, height);
}
}
void Compositor::AddImage(const unsigned char* color_buffer,
const float* depth_buffer,
const int width,
const int height,
const int vis_order)
{
assert(m_composite_mode == VIS_ORDER_BLEND);
Image image;
const size_t image_index = m_images.size();
m_images.push_back(image);
m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order);
}
void Compositor::AddImage(const float* color_buffer,
const float* depth_buffer,
const int width,
const int height,
const int vis_order)
{
assert(m_composite_mode == VIS_ORDER_BLEND);
Image image;
const size_t image_index = m_images.size();
m_images.push_back(image);
m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order);
}
Image Compositor::Composite()
{
assert(m_images.size() != 0);
if (m_composite_mode == Z_BUFFER_SURFACE)
{
CompositeZBufferSurface();
}
else if (m_composite_mode == Z_BUFFER_BLEND)
{
CompositeZBufferBlend();
}
else if (m_composite_mode == VIS_ORDER_BLEND)
{
CompositeVisOrder();
}
// Make this a param to avoid the copy?
return m_images[0];
}
void Compositor::Cleanup() {}
std::string Compositor::GetLogString()
{
std::string res = m_log_stream.str();
m_log_stream.str("");
return res;
}
void Compositor::CompositeZBufferSurface()
{
// nothing to do here in serial. Images were composited as
// they were added to the compositor
#ifdef VTKM_ENABLE_MPI
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
assert(m_images.size() == 1);
RadixKCompositor compositor;
compositor.CompositeSurface(comm, this->m_images[0]);
m_log_stream << compositor.GetTimingString();
#endif
}
void Compositor::CompositeZBufferBlend()
{
throw vtkm::cont::ErrorBadValue("Not implemented");
}
void Compositor::CompositeVisOrder()
{
#ifdef VTKM_ENABLE_MPI
/*
vtkhdiy::mpi::communicator diy_comm;
diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle()));
assert(m_images.size() != 0);
DirectSendCompositor compositor;
compositor.CompositeVolume(diy_comm, this->m_images);
*/
#else
vtkm::rendering::compositing::ImageCompositor compositor;
compositor.OrderedComposite(m_images);
#endif
}
}
}
} //namespace vtkm::rendering::compositing

@ -0,0 +1,104 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_Compositor_h
#define vtk_m_rendering_compositing_Compositor_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <vtkm/rendering/compositing/Image.h>
#ifdef VTKM_ENABLE_MPI
#include <mpi.h>
#endif
namespace vtkm
{
namespace rendering
{
namespace compositing
{
class VTKM_RENDERING_EXPORT Compositor
{
public:
enum CompositeMode
{
Z_BUFFER_SURFACE, // zbuffer composite no transparency
Z_BUFFER_BLEND, // zbuffer composite with transparency
VIS_ORDER_BLEND // blend images in a specific order
};
Compositor();
virtual ~Compositor();
void SetCompositeMode(CompositeMode composite_mode);
void ClearImages();
void AddImage(const unsigned char* color_buffer,
const float* depth_buffer,
const int width,
const int height);
void AddImage(const float* color_buffer,
const float* depth_buffer,
const int width,
const int height);
void AddImage(const unsigned char* color_buffer,
const float* depth_buffer,
const int width,
const int height,
const int vis_order);
void AddImage(const float* color_buffer,
const float* depth_buffer,
const int width,
const int height,
const int vis_order);
Image Composite();
virtual void Cleanup();
std::string GetLogString();
unsigned char* ConvertBuffer(const float* buffer, const int size)
{
unsigned char* ubytes = new unsigned char[size];
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
ubytes[i] = static_cast<unsigned char>(buffer[i] * 255.f);
}
return ubytes;
}
protected:
virtual void CompositeZBufferSurface();
virtual void CompositeZBufferBlend();
virtual void CompositeVisOrder();
std::stringstream m_log_stream;
CompositeMode m_composite_mode;
std::vector<Image> m_images;
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_Compositor_h

@ -0,0 +1,173 @@
#include <vtkh/compositing/DirectSendCompositor.hpp>
#include <vtkh/compositing/ImageCompositor.hpp>
#include <vtkh/compositing/MPICollect.hpp>
#include <vtkh/compositing/vtkh_diy_collect.hpp>
#include <vtkh/compositing/vtkh_diy_utils.hpp>
#include <diy/master.hpp>
#include <diy/mpi.hpp>
#include <diy/partners/swap.hpp>
#include <diy/reduce-operations.hpp>
#include <diy/reduce.hpp>
namespace vtkh
{
struct Redistribute
{
typedef vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds> Decomposer;
const vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds>& m_decomposer;
Redistribute(const Decomposer& decomposer)
: m_decomposer(decomposer)
{
}
void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const
{
MultiImageBlock* block = static_cast<MultiImageBlock*>(v_block);
//
// first round we have no incoming. Take the image we have,
// chop it up into pieces, and send it to the domain resposible
// for that portion
//
const int world_size = m_decomposer.nblocks;
const int local_images = block->m_images.size();
if (proxy.in_link().size() == 0)
{
std::map<vtkhdiy::BlockID, std::vector<Image>> outgoing;
for (int i = 0; i < world_size; ++i)
{
vtkhdiy::DiscreteBounds sub_image_bounds;
m_decomposer.fill_bounds(sub_image_bounds, i);
vtkm::Bounds vtkm_sub_bounds = DIYBoundsToVTKM(sub_image_bounds);
vtkhdiy::BlockID dest = proxy.out_link().target(i);
outgoing[dest].resize(local_images);
for (int img = 0; img < local_images; ++img)
{
outgoing[dest][img].SubsetFrom(block->m_images[img], vtkm_sub_bounds);
}
} //for
typename std::map<vtkhdiy::BlockID, std::vector<Image>>::iterator it;
for (it = outgoing.begin(); it != outgoing.end(); ++it)
{
proxy.enqueue(it->first, it->second);
}
} // if
else if (block->m_images.at(0).m_composite_order != -1)
{
// blend images according to vis order
std::vector<Image> images;
for (int i = 0; i < proxy.in_link().size(); ++i)
{
std::vector<Image> incoming;
int gid = proxy.in_link().target(i).gid;
proxy.dequeue(gid, incoming);
const int in_size = incoming.size();
for (int img = 0; img < in_size; ++img)
{
images.emplace_back(incoming[img]);
//std::cout<<"rank "<<rank<<" rec "<<incoming[img].ToString()<<"\n";
}
} // for
ImageCompositor compositor;
compositor.OrderedComposite(images);
block->m_output.Swap(images[0]);
} // else if
else if (block->m_images.at(0).m_composite_order == -1 &&
block->m_images.at(0).HasTransparency())
{
std::vector<Image> images;
for (int i = 0; i < proxy.in_link().size(); ++i)
{
std::vector<Image> incoming;
int gid = proxy.in_link().target(i).gid;
proxy.dequeue(gid, incoming);
const int in_size = incoming.size();
for (int img = 0; img < in_size; ++img)
{
images.emplace_back(incoming[img]);
//std::cout<<"rank "<<rank<<" rec "<<incoming[img].ToString()<<"\n";
}
} // for
//
// we have images with a depth buffer and transparency
//
ImageCompositor compositor;
compositor.ZBufferBlend(images);
}
} // operator
};
DirectSendCompositor::DirectSendCompositor() {}
DirectSendCompositor::~DirectSendCompositor() {}
void DirectSendCompositor::CompositeVolume(vtkhdiy::mpi::communicator& diy_comm,
std::vector<Image>& images)
{
vtkhdiy::DiscreteBounds global_bounds = VTKMBoundsToDIY(images.at(0).m_orig_bounds);
const int num_threads = 1;
const int num_blocks = diy_comm.size();
const int magic_k = 8;
Image sub_image;
//
// DIY does not seem to like being called with different block types
// so we isolate them within separate blocks
//
{
vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) {
ImageBlock<Image>* block = reinterpret_cast<ImageBlock<Image>*>(b);
delete block;
});
// create an assigner with one block per rank
vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks);
AddMultiImageBlock create(master, images, sub_image);
const int dims = 2;
vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds> decomposer(dims, global_bounds, num_blocks);
decomposer.decompose(diy_comm.rank(), assigner, create);
vtkhdiy::all_to_all(master, assigner, Redistribute(decomposer), magic_k);
}
{
vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) {
ImageBlock<Image>* block = reinterpret_cast<ImageBlock<Image>*>(b);
delete block;
});
vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks);
const int dims = 2;
vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds> decomposer(dims, global_bounds, num_blocks);
AddImageBlock<Image> all_create(master, sub_image);
decomposer.decompose(diy_comm.rank(), assigner, all_create);
MPI_Barrier(diy_comm);
//MPICollect(sub_image,diy_comm);
vtkhdiy::all_to_all(master, assigner, CollectImages<Image>(decomposer), magic_k);
}
images.at(0).Swap(sub_image);
}
std::string DirectSendCompositor::GetTimingString()
{
std::string res(m_timing_log.str());
m_timing_log.str("");
return res;
}
}

@ -0,0 +1,24 @@
#ifndef VTKH_DIY_DIRECT_SEND_HPP
#define VTKH_DIY_DIRECT_SEND_HPP
#include <diy/mpi.hpp>
#include <sstream>
#include <vtkh/compositing/Image.hpp>
namespace vtkh
{
class DirectSendCompositor
{
public:
DirectSendCompositor();
~DirectSendCompositor();
void CompositeVolume(vtkhdiy::mpi::communicator& diy_comm, std::vector<Image>& images);
std::string GetTimingString();
private:
std::stringstream m_timing_log;
};
} // namespace vtkh
#endif

@ -0,0 +1,35 @@
// See License.txt
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/PNGEncoder.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
void Image::Save(const std::string& name, const std::vector<std::string>& comments)
{
PNGEncoder encoder;
encoder.Encode(&m_pixels[0],
m_bounds.X.Max - m_bounds.X.Min + 1,
m_bounds.Y.Max - m_bounds.Y.Min + 1,
comments);
encoder.Save(name);
}
void Image::Save(const std::string& name, const std::vector<std::string>& comments) const
{
PNGEncoder encoder;
encoder.Encode(&m_pixels[0],
m_bounds.X.Max - m_bounds.X.Min + 1,
m_bounds.Y.Max - m_bounds.Y.Min + 1,
comments);
encoder.Save(name);
}
}
}
} //namespace vtkm::rendering::compositing

@ -0,0 +1,333 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_Image_h
#define vtk_m_rendering_compositing_Image_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <sstream>
#include <vector>
#include <vtkm/Bounds.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
struct VTKM_RENDERING_EXPORT Image
{
// The image bounds are indicated by a grid starting at
// 1-width and 1-height. Actual width would be calculated
// m_bounds.X.Max - m_bounds.X.Min + 1
// 1024 - 1 + 1 = 1024
vtkm::Bounds m_orig_bounds;
vtkm::Bounds m_bounds;
std::vector<unsigned char> m_pixels;
std::vector<float> m_depths;
int m_orig_rank;
bool m_has_transparency;
int m_composite_order;
Image()
: m_orig_rank(-1)
, m_has_transparency(false)
, m_composite_order(-1)
{
}
Image(const vtkm::Bounds& bounds)
: m_orig_bounds(bounds)
, m_bounds(bounds)
, m_orig_rank(-1)
, m_has_transparency(false)
, m_composite_order(-1)
{
const int dx = bounds.X.Max - bounds.X.Min + 1;
const int dy = bounds.Y.Max - bounds.Y.Min + 1;
m_pixels.resize(dx * dy * 4);
m_depths.resize(dx * dy);
}
// init this image based on the original bounds
// of the other image
void InitOriginal(const Image& other)
{
m_orig_bounds = other.m_orig_bounds;
m_bounds = other.m_orig_bounds;
const int dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
m_pixels.resize(dx * dy * 4);
m_depths.resize(dx * dy);
m_orig_rank = -1;
m_has_transparency = false;
m_composite_order = -1;
}
int GetNumberOfPixels() const { return static_cast<int>(m_pixels.size() / 4); }
void SetHasTransparency(bool has_transparency) { m_has_transparency = has_transparency; }
bool HasTransparency() { return m_has_transparency; }
void Init(const float* color_buffer,
const float* depth_buffer,
int width,
int height,
int composite_order = -1)
{
m_composite_order = composite_order;
m_bounds.X.Min = 1;
m_bounds.Y.Min = 1;
m_bounds.X.Max = width;
m_bounds.Y.Max = height;
m_orig_bounds = m_bounds;
const int size = width * height;
m_pixels.resize(size * 4);
m_depths.resize(size);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
const int offset = i * 4;
m_pixels[offset + 0] = static_cast<unsigned char>(color_buffer[offset + 0] * 255.f);
m_pixels[offset + 1] = static_cast<unsigned char>(color_buffer[offset + 1] * 255.f);
m_pixels[offset + 2] = static_cast<unsigned char>(color_buffer[offset + 2] * 255.f);
m_pixels[offset + 3] = static_cast<unsigned char>(color_buffer[offset + 3] * 255.f);
float depth = depth_buffer[i];
//make sure we can do a single comparison on depth
//deal with negative depth values
//TODO: This may not be the best way
depth = depth < 0 ? abs(depth) : depth;
m_depths[i] = depth;
}
}
void Init(const unsigned char* color_buffer,
const float* depth_buffer,
int width,
int height,
int composite_order = -1)
{
m_composite_order = composite_order;
m_bounds.X.Min = 1;
m_bounds.Y.Min = 1;
m_bounds.X.Max = width;
m_bounds.Y.Max = height;
m_orig_bounds = m_bounds;
const int size = width * height;
m_pixels.resize(size * 4);
m_depths.resize(size);
std::copy(color_buffer, color_buffer + size * 4, &m_pixels[0]);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
float depth = depth_buffer[i];
//make sure we can do a single comparison on depth
depth = depth < 0 ? 2.f : depth;
m_depths[i] = depth;
} // for
}
void CompositeBackground(const float* color)
{
const int size = static_cast<int>(m_pixels.size() / 4);
unsigned char bg_color[4];
for (int i = 0; i < 4; ++i)
{
bg_color[i] = static_cast<unsigned char>(color[i] * 255.f);
}
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
const int offset = i * 4;
unsigned int alpha = static_cast<unsigned int>(m_pixels[offset + 3]);
const float opacity = (255 - alpha);
m_pixels[offset + 0] += static_cast<unsigned char>(opacity * bg_color[0] / 255);
m_pixels[offset + 1] += static_cast<unsigned char>(opacity * bg_color[1] / 255);
m_pixels[offset + 2] += static_cast<unsigned char>(opacity * bg_color[2] / 255);
m_pixels[offset + 3] += static_cast<unsigned char>(opacity * bg_color[3] / 255);
}
}
//
// Fill this image with a sub-region of another image
//
void SubsetFrom(const Image& image, const vtkm::Bounds& sub_region)
{
m_orig_bounds = image.m_orig_bounds;
m_bounds = sub_region;
m_orig_rank = image.m_orig_rank;
m_composite_order = image.m_composite_order;
assert(sub_region.X.Min >= image.m_bounds.X.Min);
assert(sub_region.Y.Min >= image.m_bounds.Y.Min);
assert(sub_region.X.Max <= image.m_bounds.X.Max);
assert(sub_region.Y.Max <= image.m_bounds.Y.Max);
const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1;
//const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1;
const int start_x = m_bounds.X.Min - image.m_bounds.X.Min;
const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min;
const int end_y = start_y + s_dy;
m_pixels.resize(s_dx * s_dy * 4);
m_depths.resize(s_dx * s_dy);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = start_y; y < end_y; ++y)
{
const int copy_to = (y - start_y) * s_dx;
const int copy_from = y * dx + start_x;
std::copy(&image.m_pixels[copy_from * 4],
&image.m_pixels[copy_from * 4] + s_dx * 4,
&m_pixels[copy_to * 4]);
std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]);
}
}
void Color(int color)
{
unsigned char c[4];
c[3] = 255;
c[0] = 0;
c[1] = 0;
c[2] = 0;
int index = color % 3;
c[index] = 255 - color * 11;
;
const int size = static_cast<int>(m_pixels.size());
for (int i = 0; i < size; ++i)
{
float d = m_depths[i / 4];
if (d > 0 && d < 1)
{
m_pixels[i] = c[i % 4];
}
else
{
m_pixels[i] = 155;
}
}
}
//
// Fills the passed in image with the contents of this image
//
void SubsetTo(Image& image) const
{
image.m_composite_order = m_composite_order;
assert(m_bounds.X.Min >= image.m_bounds.X.Min);
assert(m_bounds.Y.Min >= image.m_bounds.Y.Min);
assert(m_bounds.X.Max <= image.m_bounds.X.Max);
assert(m_bounds.Y.Max <= image.m_bounds.Y.Max);
const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1;
//const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1;
const int start_x = m_bounds.X.Min - image.m_bounds.X.Min;
const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min;
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = 0; y < s_dy; ++y)
{
const int copy_to = (y + start_y) * dx + start_x;
const int copy_from = y * s_dx;
std::copy(&m_pixels[copy_from * 4],
&m_pixels[copy_from * 4] + s_dx * 4,
&image.m_pixels[copy_to * 4]);
std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]);
}
}
void Swap(Image& other)
{
vtkm::Bounds orig = m_orig_bounds;
vtkm::Bounds bounds = m_bounds;
m_orig_bounds = other.m_orig_bounds;
m_bounds = other.m_bounds;
other.m_orig_bounds = orig;
other.m_bounds = bounds;
m_pixels.swap(other.m_pixels);
m_depths.swap(other.m_depths);
}
void Clear()
{
vtkm::Bounds empty;
m_orig_bounds = empty;
m_bounds = empty;
m_pixels.clear();
m_depths.clear();
}
std::string ToString() const
{
std::stringstream ss;
ss << "Total size pixels " << (int)m_pixels.size() / 4;
ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - ";
ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n";
;
return ss.str();
}
void Save(const std::string& name, const std::vector<std::string>& comments) const;
void Save(const std::string& name, const std::vector<std::string>& comments);
};
struct CompositeOrderSort
{
inline bool operator()(const Image& lhs, const Image& rhs) const
{
return lhs.m_composite_order < rhs.m_composite_order;
}
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_Image_h

@ -0,0 +1,222 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_ImageCompositor_h
#define vtk_m_rendering_compositing_ImageCompositor_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <algorithm>
#include <vtkm/rendering/compositing/Image.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
class VTKM_RENDERING_EXPORT ImageCompositor
{
public:
void Blend(vtkm::rendering::compositing::Image& front, vtkm::rendering::compositing::Image& back)
{
assert(front.m_bounds.X.Min == back.m_bounds.X.Min);
assert(front.m_bounds.Y.Min == back.m_bounds.Y.Min);
assert(front.m_bounds.X.Max == back.m_bounds.X.Max);
assert(front.m_bounds.Y.Max == back.m_bounds.Y.Max);
const int size = static_cast<int>(front.m_pixels.size() / 4);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
const int offset = i * 4;
unsigned int alpha = front.m_pixels[offset + 3];
const unsigned int opacity = 255 - alpha;
front.m_pixels[offset + 0] +=
static_cast<unsigned char>(opacity * back.m_pixels[offset + 0] / 255);
front.m_pixels[offset + 1] +=
static_cast<unsigned char>(opacity * back.m_pixels[offset + 1] / 255);
front.m_pixels[offset + 2] +=
static_cast<unsigned char>(opacity * back.m_pixels[offset + 2] / 255);
front.m_pixels[offset + 3] +=
static_cast<unsigned char>(opacity * back.m_pixels[offset + 3] / 255);
float d1 = std::min(front.m_depths[i], 1.001f);
float d2 = std::min(back.m_depths[i], 1.001f);
float depth = std::min(d1, d2);
front.m_depths[i] = depth;
}
}
void ZBufferComposite(vtkm::rendering::compositing::Image& front,
const vtkm::rendering::compositing::Image& image)
{
assert(front.m_depths.size() == front.m_pixels.size() / 4);
assert(front.m_bounds.X.Min == image.m_bounds.X.Min);
assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min);
assert(front.m_bounds.X.Max == image.m_bounds.X.Max);
assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max);
const int size = static_cast<int>(front.m_depths.size());
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
const float depth = image.m_depths[i];
if (depth > 1.f || front.m_depths[i] < depth)
{
continue;
}
const int offset = i * 4;
front.m_depths[i] = abs(depth);
front.m_pixels[offset + 0] = image.m_pixels[offset + 0];
front.m_pixels[offset + 1] = image.m_pixels[offset + 1];
front.m_pixels[offset + 2] = image.m_pixels[offset + 2];
front.m_pixels[offset + 3] = image.m_pixels[offset + 3];
}
}
void OrderedComposite(std::vector<vtkm::rendering::compositing::Image>& images)
{
const int total_images = images.size();
std::sort(images.begin(), images.end(), CompositeOrderSort());
for (int i = 1; i < total_images; ++i)
{
Blend(images[0], images[i]);
}
}
void ZBufferComposite(std::vector<vtkm::rendering::compositing::Image>& images)
{
const int total_images = images.size();
for (int i = 1; i < total_images; ++i)
{
ZBufferComposite(images[0], images[i]);
}
}
struct Pixel
{
unsigned char m_color[4];
float m_depth;
int m_pixel_id; // local (sub-image) pixels id
bool operator<(const Pixel& other) const
{
if (m_pixel_id != other.m_pixel_id)
{
return m_pixel_id < other.m_pixel_id;
}
else
{
return m_depth < other.m_depth;
}
}
};
void CombineImages(const std::vector<vtkm::rendering::compositing::Image>& images,
std::vector<Pixel>& pixels)
{
const int num_images = static_cast<int>(images.size());
for (int i = 0; i < num_images; ++i)
{
//
// Extract the partial composites into a contiguous array
//
const int image_size = images[i].GetNumberOfPixels();
const int offset = i * image_size;
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int j = 0; j < image_size; ++j)
{
const int image_offset = j * 4;
pixels[offset + j].m_color[0] = images[i].m_pixels[image_offset + 0];
pixels[offset + j].m_color[1] = images[i].m_pixels[image_offset + 1];
pixels[offset + j].m_color[2] = images[i].m_pixels[image_offset + 2];
pixels[offset + j].m_color[3] = images[i].m_pixels[image_offset + 3];
pixels[offset + j].m_depth = images[i].m_depths[j];
pixels[offset + j].m_pixel_id = j;
} // for pixels
} // for images
}
void ZBufferBlend(std::vector<vtkm::rendering::compositing::Image>& images)
{
const int image_pixels = images[0].GetNumberOfPixels();
const int num_images = static_cast<int>(images.size());
std::vector<Pixel> pixels;
CombineImages(images, pixels);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < image_pixels; ++i)
{
const int begin = image_pixels * i;
const int end = image_pixels * i - 1;
std::sort(pixels.begin() + begin, pixels.begin() + end);
}
// check to see if that worked
int pixel_id_0 = pixels[0].m_pixel_id;
for (int i = 1; i < num_images; ++i)
{
assert(pixel_id_0 == pixels[i].m_pixel_id);
}
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < image_pixels; ++i)
{
const int index = i * num_images;
Pixel pixel = pixels[index];
for (int j = 1; j < num_images; ++j)
{
if (pixel.m_color[3] == 255 || pixel.m_depth > 1.f)
{
break;
}
unsigned int alpha = pixel.m_color[3];
const unsigned int opacity = 255 - alpha;
pixel.m_color[0] +=
static_cast<unsigned char>(opacity * pixels[index + j].m_color[0] / 255);
pixel.m_color[1] +=
static_cast<unsigned char>(opacity * pixels[index + j].m_color[1] / 255);
pixel.m_color[2] +=
static_cast<unsigned char>(opacity * pixels[index + j].m_color[2] / 255);
pixel.m_color[3] +=
static_cast<unsigned char>(opacity * pixels[index + j].m_color[3] / 255);
pixel.m_depth = pixels[index + j].m_depth;
} // for each image
images[0].m_pixels[i * 4 + 0] = pixel.m_color[0];
images[0].m_pixels[i * 4 + 1] = pixel.m_color[1];
images[0].m_pixels[i * 4 + 2] = pixel.m_color[2];
images[0].m_pixels[i * 4 + 3] = pixel.m_color[3];
images[0].m_depths[i] = pixel.m_depth;
} // for each pixel
}
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_ImageComposititing_h

@ -0,0 +1,246 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/internal/Configure.h>
#include <vtkm/rendering/compositing/PNGEncoder.h>
#include <iostream>
#include <stdlib.h>
VTKM_THIRDPARTY_PRE_INCLUDE
#include <vtkm/thirdparty/lodepng/vtkmlodepng/lodepng.h>
VTKM_THIRDPARTY_POST_INCLUDE
namespace vtkm
{
namespace rendering
{
namespace compositing
{
PNGEncoder::PNGEncoder()
: m_buffer(NULL)
, m_buffer_size(0)
{
}
PNGEncoder::~PNGEncoder()
{
Cleanup();
}
void PNGEncoder::Encode(const unsigned char* rgba_in, const int width, const int height)
{
Cleanup();
// upside down relative to what lodepng wants
unsigned char* rgba_flip = new unsigned char[width * height * 4];
for (int y = 0; y < height; ++y)
{
memcpy(&(rgba_flip[y * width * 4]), &(rgba_in[(height - y - 1) * width * 4]), width * 4);
}
vtkm::png::LodePNGState state;
vtkm::png::lodepng_state_init(&state);
// use less aggressive compression
state.encoder.zlibsettings.btype = 2;
state.encoder.zlibsettings.use_lz77 = 0;
unsigned error = lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state);
delete[] rgba_flip;
if (error)
{
std::cerr << "lodepng_encode_memory failed\n";
}
}
void PNGEncoder::Encode(const float* rgba_in, const int width, const int height)
{
Cleanup();
// upside down relative to what lodepng wants
unsigned char* rgba_flip = new unsigned char[width * height * 4];
for (int x = 0; x < width; ++x)
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = 0; y < height; ++y)
{
int inOffset = (y * width + x) * 4;
int outOffset = ((height - y - 1) * width + x) * 4;
rgba_flip[outOffset + 0] = (unsigned char)(rgba_in[inOffset + 0] * 255.f);
rgba_flip[outOffset + 1] = (unsigned char)(rgba_in[inOffset + 1] * 255.f);
rgba_flip[outOffset + 2] = (unsigned char)(rgba_in[inOffset + 2] * 255.f);
rgba_flip[outOffset + 3] = (unsigned char)(rgba_in[inOffset + 3] * 255.f);
}
vtkm::png::LodePNGState state;
vtkm::png::lodepng_state_init(&state);
// use less aggressive compression
state.encoder.zlibsettings.btype = 2;
state.encoder.zlibsettings.use_lz77 = 0;
unsigned error = lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state);
delete[] rgba_flip;
if (error)
{
std::cerr << "lodepng_encode_memory failed\n";
}
}
void PNGEncoder::Encode(const unsigned char* rgba_in,
const int width,
const int height,
const std::vector<std::string>& comments)
{
Cleanup();
// upside down relative to what lodepng wants
unsigned char* rgba_flip = new unsigned char[width * height * 4];
for (int y = 0; y < height; ++y)
{
memcpy(&(rgba_flip[y * width * 4]), &(rgba_in[(height - y - 1) * width * 4]), width * 4);
}
vtkm::png::LodePNGState state;
vtkm::png::lodepng_state_init(&state);
// use less aggressive compression
state.encoder.zlibsettings.btype = 2;
state.encoder.zlibsettings.use_lz77 = 0;
if (comments.size() % 2 != 0)
{
std::cerr << "PNGEncoder::Encode comments missing value for the last key.\n";
std::cerr << "Ignoring the last key.\n";
}
if (comments.size() > 1)
{
vtkm::png::lodepng_info_init(&state.info_png);
// Comments are in pairs with a key and a value, using
// comments.size()-1 ensures that we don't use the last
// comment if the length of the vector isn't a multiple of 2.
for (int i = 0; i < comments.size() - 1; i += 2)
vtkm::png::lodepng_add_text(&state.info_png, comments[i].c_str(), comments[i + 1].c_str());
}
unsigned error =
vtkm::png::lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state);
delete[] rgba_flip;
if (error)
{
std::cerr << "lodepng_encode_memory failed\n";
}
}
void PNGEncoder::Encode(const float* rgba_in,
const int width,
const int height,
const std::vector<std::string>& comments)
{
Cleanup();
// upside down relative to what lodepng wants
unsigned char* rgba_flip = new unsigned char[width * height * 4];
for (int x = 0; x < width; ++x)
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = 0; y < height; ++y)
{
int inOffset = (y * width + x) * 4;
int outOffset = ((height - y - 1) * width + x) * 4;
rgba_flip[outOffset + 0] = (unsigned char)(rgba_in[inOffset + 0] * 255.f);
rgba_flip[outOffset + 1] = (unsigned char)(rgba_in[inOffset + 1] * 255.f);
rgba_flip[outOffset + 2] = (unsigned char)(rgba_in[inOffset + 2] * 255.f);
rgba_flip[outOffset + 3] = (unsigned char)(rgba_in[inOffset + 3] * 255.f);
}
vtkm::png::LodePNGState state;
vtkm::png::lodepng_state_init(&state);
// use less aggressive compression
state.encoder.zlibsettings.btype = 2;
state.encoder.zlibsettings.use_lz77 = 0;
if (comments.size() % 2 != 0)
{
std::cerr << "PNGEncoder::Encode comments missing value for the last key.\n";
std::cerr << "Ignoring the last key.\n";
}
if (comments.size() > 1)
{
vtkm::png::lodepng_info_init(&state.info_png);
// Comments are in pairs with a key and a value, using
// comments.size()-1 ensures that we don't use the last
// comment if the length of the vector isn't a multiple of 2.
for (int i = 0; i < comments.size() - 1; i += 2)
vtkm::png::lodepng_add_text(&state.info_png, comments[i].c_str(), comments[i + 1].c_str());
}
unsigned error =
vtkm::png::lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state);
delete[] rgba_flip;
if (error)
{
std::cerr << "lodepng_encode_memory failed\n";
}
}
void PNGEncoder::Save(const std::string& filename)
{
if (m_buffer == NULL)
{
std::cerr << "Save must be called after encode()\n";
/// we have a problem ...!
return;
}
unsigned error = vtkm::png::lodepng_save_file(m_buffer, m_buffer_size, filename.c_str());
if (error)
{
std::cerr << "Error saving PNG buffer to file: " << filename << "\n";
}
}
void* PNGEncoder::PngBuffer()
{
return (void*)m_buffer;
}
size_t PNGEncoder::PngBufferSize()
{
return m_buffer_size;
}
void PNGEncoder::Cleanup()
{
if (m_buffer != NULL)
{
//lodepng_free(m_buffer);
// ^-- Not found even if LODEPNG_COMPILE_ALLOCATORS is defined?
// simply use "free"
free(m_buffer);
m_buffer = NULL;
m_buffer_size = 0;
}
}
}
}
} //namespace vtkm::rendering::compositing

@ -0,0 +1,57 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_PNGEncoder_h
#define vtk_m_rendering_compositing_PNGEncoder_h
#include <string>
#include <vector>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
class PNGEncoder
{
public:
PNGEncoder();
~PNGEncoder();
void Encode(const unsigned char* rgba_in, const int width, const int height);
void Encode(const float* rgba_in, const int width, const int height);
void Encode(const unsigned char* rgba_in,
const int width,
const int height,
const std::vector<std::string>& comments);
void Encode(const float* rgba_in,
const int width,
const int height,
const std::vector<std::string>& comments);
void Save(const std::string& filename);
void* PngBuffer();
size_t PngBufferSize();
void Cleanup();
private:
unsigned char* m_buffer;
size_t m_buffer_size;
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_PNGEncoder_h

@ -0,0 +1,62 @@
#include <vtkm/rendering/compositing/PayloadCompositor.h>
#include <vtkm/rendering/compositing/PayloadImageCompositor.h>
#include <algorithm>
#include <assert.h>
#ifdef VTKH_PARALLEL
#include <diy/mpi.hpp>
#include <mpi.h>
#include <vtkh/compositing/RadixKCompositor.hpp>
#include <vtkh/vtkh.hpp>
#endif
using namespace vtkm::rendering::compositing;
namespace vtkh
{
PayloadCompositor::PayloadCompositor() {}
void PayloadCompositor::ClearImages()
{
m_images.clear();
}
void PayloadCompositor::AddImage(PayloadImage& image)
{
assert(image.GetNumberOfPixels() != 0);
if (m_images.size() == 0)
{
m_images.push_back(image);
}
else
{
//
// Do local composite and keep a single image
//
PayloadImageCompositor compositor;
compositor.ZBufferComposite(m_images[0], image);
}
}
PayloadImage PayloadCompositor::Composite()
{
assert(m_images.size() != 0);
// nothing to do here in serial. Images were composited as
// they were added to the compositor
#ifdef VTKH_PARALLEL
vtkhdiy::mpi::communicator diy_comm;
diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle()));
assert(m_images.size() == 1);
RadixKCompositor compositor;
compositor.CompositeSurface(diy_comm, this->m_images[0]);
#endif
// Make this a param to avoid the copy?
return m_images[0];
}
} // namespace vtkh

@ -0,0 +1,28 @@
#ifndef VTKH_PAYLOAD_COMPOSITOR_HPP
#define VTKH_PAYLOAD_COMPOSITOR_HPP
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <vtkm/rendering/compositing/PayloadImage.h>
namespace vtkh
{
class VTKM_RENDERING_EXPORT PayloadCompositor
{
public:
PayloadCompositor();
void ClearImages();
void AddImage(vtkm::rendering::compositing::PayloadImage& image);
vtkm::rendering::compositing::PayloadImage Composite();
protected:
std::vector<vtkm::rendering::compositing::PayloadImage> m_images;
};
};
#endif

@ -0,0 +1,33 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/rendering/compositing/PNGEncoder.h>
#include <vtkm/rendering/compositing/PayloadImage.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
void PayloadImage::Save(const std::string& name, const std::vector<std::string>& comments)
{
PNGEncoder encoder;
encoder.Encode(&m_payloads[0],
m_bounds.X.Max - m_bounds.X.Min + 1,
m_bounds.Y.Max - m_bounds.Y.Min + 1,
comments);
encoder.Save(name);
}
}
}
} //namespace vtkm::rendering::compositing

@ -0,0 +1,211 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_PayloadImage_h
#define vtk_m_rendering_compositing_PayloadImage_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <sstream>
#include <vector>
#include <vtkm/Bounds.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
struct VTKM_RENDERING_EXPORT PayloadImage
{
// The image bounds are indicated by a grid starting at
// 1-width and 1-height. Actual width would be calculated
// m_bounds.X.Max - m_bounds.X.Min + 1
// 1024 - 1 + 1 = 1024
vtkm::Bounds m_orig_bounds;
vtkm::Bounds m_bounds;
std::vector<unsigned char> m_payloads;
std::vector<float> m_depths;
int m_orig_rank;
int m_payload_bytes; // Size of the payload in bytes
float m_default_value;
PayloadImage() {}
PayloadImage(const vtkm::Bounds& bounds, const int payload_bytes)
: m_orig_bounds(bounds)
, m_bounds(bounds)
, m_orig_rank(-1)
, m_payload_bytes(payload_bytes)
{
m_default_value = vtkm::Nan32();
const int dx = bounds.X.Max - bounds.X.Min + 1;
const int dy = bounds.Y.Max - bounds.Y.Min + 1;
m_payloads.resize(dx * dy * m_payload_bytes);
m_depths.resize(dx * dy);
}
void InitOriginal(const PayloadImage& other)
{
m_orig_bounds = other.m_orig_bounds;
m_bounds = other.m_orig_bounds;
m_payload_bytes = other.m_payload_bytes;
m_default_value = other.m_default_value;
const int dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
m_payloads.resize(dx * dy * m_payload_bytes);
m_depths.resize(dx * dy);
m_orig_rank = -1;
}
int GetNumberOfPixels() const { return static_cast<int>(m_depths.size()); }
void Init(const unsigned char* payload_buffer, const float* depth_buffer, int width, int height)
{
m_bounds.X.Min = 1;
m_bounds.Y.Min = 1;
m_bounds.X.Max = width;
m_bounds.Y.Max = height;
m_orig_bounds = m_bounds;
const int size = width * height;
m_payloads.resize(size * m_payload_bytes);
m_depths.resize(size);
std::copy(payload_buffer, payload_buffer + size * m_payload_bytes, &m_payloads[0]);
std::copy(depth_buffer, depth_buffer + size, &m_depths[0]);
}
//
// Fill this image with a sub-region of another image
//
void SubsetFrom(const PayloadImage& image, const vtkm::Bounds& sub_region)
{
m_orig_bounds = image.m_orig_bounds;
m_bounds = sub_region;
m_orig_rank = image.m_orig_rank;
m_payload_bytes = image.m_payload_bytes;
assert(sub_region.X.Min >= image.m_bounds.X.Min);
assert(sub_region.Y.Min >= image.m_bounds.Y.Min);
assert(sub_region.X.Max <= image.m_bounds.X.Max);
assert(sub_region.Y.Max <= image.m_bounds.Y.Max);
const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1;
//const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1;
const int start_x = m_bounds.X.Min - image.m_bounds.X.Min;
const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min;
const int end_y = start_y + s_dy;
size_t buffer_size = s_dx * s_dy * m_payload_bytes;
m_payloads.resize(buffer_size);
m_depths.resize(s_dx * s_dy);
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = start_y; y < end_y; ++y)
{
const int copy_to = (y - start_y) * s_dx;
const int copy_from = y * dx + start_x;
std::copy(&image.m_payloads[copy_from * m_payload_bytes],
&image.m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes,
&m_payloads[copy_to * m_payload_bytes]);
std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]);
}
}
//
// Fills the passed in image with the contents of this image
//
void SubsetTo(PayloadImage& image) const
{
assert(m_bounds.X.Min >= image.m_bounds.X.Min);
assert(m_bounds.Y.Min >= image.m_bounds.Y.Min);
assert(m_bounds.X.Max <= image.m_bounds.X.Max);
assert(m_bounds.Y.Max <= image.m_bounds.Y.Max);
const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1;
const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1;
const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1;
//const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1;
const int start_x = m_bounds.X.Min - image.m_bounds.X.Min;
const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min;
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int y = 0; y < s_dy; ++y)
{
const int copy_to = (y + start_y) * dx + start_x;
const int copy_from = y * s_dx;
std::copy(&m_payloads[copy_from * m_payload_bytes],
&m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes,
&image.m_payloads[copy_to * m_payload_bytes]);
std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]);
}
}
void Swap(PayloadImage& other)
{
vtkm::Bounds orig = m_orig_bounds;
vtkm::Bounds bounds = m_bounds;
m_orig_bounds = other.m_orig_bounds;
m_bounds = other.m_bounds;
other.m_orig_bounds = orig;
other.m_bounds = bounds;
m_payloads.swap(other.m_payloads);
m_depths.swap(other.m_depths);
}
void Clear()
{
vtkm::Bounds empty;
m_orig_bounds = empty;
m_bounds = empty;
m_payloads.clear();
m_depths.clear();
}
std::string ToString() const
{
std::stringstream ss;
ss << "Total size pixels " << (int)m_depths.size();
ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - ";
ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n";
;
return ss.str();
}
void Save(const std::string& name, const std::vector<std::string>& comments);
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_PayloadImage_h

@ -0,0 +1,74 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_PayloadImageCompositor_h
#define vtk_m_rendering_compositing_PayloadImageCompositor_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <algorithm>
#include <cmath>
#include <vtkm/rendering/compositing/PayloadImage.h>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
class VTKM_RENDERING_EXPORT PayloadImageCompositor
{
public:
void ZBufferComposite(vtkm::rendering::compositing::PayloadImage& front,
const vtkm::rendering::compositing::PayloadImage& image)
{
if (front.m_payload_bytes != image.m_payload_bytes)
{
std::cout << "very bad\n";
}
assert(front.m_depths.size() == front.m_payloads.size() / front.m_payload_bytes);
assert(front.m_bounds.X.Min == image.m_bounds.X.Min);
assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min);
assert(front.m_bounds.X.Max == image.m_bounds.X.Max);
assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max);
const int size = static_cast<int>(front.m_depths.size());
const bool nan_check = image.m_default_value != image.m_default_value;
#ifdef VTKH_OPENMP_ENABLED
#pragma omp parallel for
#endif
for (int i = 0; i < size; ++i)
{
const float depth = image.m_depths[i];
const float fdepth = front.m_depths[i];
// this should handle NaNs correctly
const bool take_back = fmin(depth, fdepth) == depth;
if (take_back)
{
const int offset = i * 4;
front.m_depths[i] = depth;
const size_t p_offset = i * front.m_payload_bytes;
std::copy(&image.m_payloads[p_offset],
&image.m_payloads[p_offset] + front.m_payload_bytes,
&front.m_payloads[p_offset]);
}
}
}
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_PayloadImageCompositor_h

@ -0,0 +1,208 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/ImageCompositor.h>
#include <vtkm/rendering/compositing/PayloadImage.h>
#include <vtkm/rendering/compositing/PayloadImageCompositor.h>
//#include <vtkm/compositing/MPICollect.hpp>
#include <vtkm/rendering/compositing/RadixKCompositor.h>
#include <vtkm/rendering/compositing/vtkm_diy_collect.h>
#include <vtkm/rendering/compositing/vtkm_diy_utils.h>
/*
#include <diy/master.hpp>
#include <diy/mpi.hpp>
#include <diy/partners/swap.hpp>
#include <diy/reduce-operations.hpp>
#include <diy/reduce.hpp>
*/
namespace vtkm
{
namespace rendering
{
namespace compositing
{
template <typename ImageType>
void DepthComposite(ImageType& front, ImageType& back);
template <>
void DepthComposite<PayloadImage>(PayloadImage& front, PayloadImage& back)
{
vtkm::rendering::compositing::PayloadImageCompositor compositor;
compositor.ZBufferComposite(front, back);
}
template <>
void DepthComposite<vtkm::rendering::compositing::Image>(vtkm::rendering::compositing::Image& front,
vtkm::rendering::compositing::Image& back)
{
vtkm::rendering::compositing::ImageCompositor compositor;
compositor.ZBufferComposite(front, back);
}
template <typename ImageType>
void reduce_images(void* b,
const vtkmdiy::ReduceProxy& proxy,
const vtkmdiy::RegularSwapPartners& partners)
{
ImageBlock<ImageType>* block = reinterpret_cast<ImageBlock<ImageType>*>(b);
unsigned int round = proxy.round();
ImageType& image = block->m_image;
// count the number of incoming pixels
if (proxy.in_link().size() > 0)
{
for (int i = 0; i < proxy.in_link().size(); ++i)
{
int gid = proxy.in_link().target(i).gid;
if (gid == proxy.gid())
{
//skip revieving from self since we sent nothing
continue;
}
ImageType incoming;
proxy.dequeue(gid, incoming);
DepthComposite(image, incoming);
} // for in links
}
if (proxy.out_link().size() == 0)
{
return;
}
// do compositing?? intermediate stage?
const int group_size = proxy.out_link().size();
const int current_dim = partners.dim(round);
//create balanced set of ranges for current dim
vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.m_bounds);
int range_length = image_bounds.max[current_dim] - image_bounds.min[current_dim];
int base_step = range_length / group_size;
int rem = range_length % group_size;
std::vector<int> bucket_sizes(group_size, base_step);
for (int i = 0; i < rem; ++i)
{
bucket_sizes[i]++;
}
int count = 0;
for (int i = 0; i < group_size; ++i)
{
count += bucket_sizes[i];
}
assert(count == range_length);
std::vector<vtkmdiy::DiscreteBounds> subset_bounds(group_size,
vtkh::VTKMBoundsToDIY(image.m_bounds));
int min_pixel = image_bounds.min[current_dim];
for (int i = 0; i < group_size; ++i)
{
subset_bounds[i].min[current_dim] = min_pixel;
subset_bounds[i].max[current_dim] = min_pixel + bucket_sizes[i];
min_pixel += bucket_sizes[i];
}
//debug
if (group_size > 1)
{
for (int i = 1; i < group_size; ++i)
{
assert(subset_bounds[i - 1].max[current_dim] == subset_bounds[i].min[current_dim]);
}
assert(subset_bounds[0].min[current_dim] == image_bounds.min[current_dim]);
assert(subset_bounds[group_size - 1].max[current_dim] == image_bounds.max[current_dim]);
}
std::vector<ImageType> out_images(group_size);
for (int i = 0; i < group_size; ++i)
{
out_images[i].SubsetFrom(image, vtkh::DIYBoundsToVTKM(subset_bounds[i]));
} //for
for (int i = 0; i < group_size; ++i)
{
if (proxy.out_link().target(i).gid == proxy.gid())
{
image.Swap(out_images[i]);
}
else
{
proxy.enqueue(proxy.out_link().target(i), out_images[i]);
}
} //for
} // reduce images
RadixKCompositor::RadixKCompositor() {}
RadixKCompositor::~RadixKCompositor() {}
template <typename ImageType>
void RadixKCompositor::CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image)
{
vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.m_orig_bounds);
// tells diy to use one thread
const int num_threads = 1;
const int num_blocks = diy_comm.size();
const int magic_k = 8;
vtkmdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) {
ImageBlock<ImageType>* block = reinterpret_cast<ImageBlock<ImageType>*>(b);
delete block;
});
// create an assigner with one block per rank
vtkmdiy::ContiguousAssigner assigner(num_blocks, num_blocks);
vtkm::rendering::compositing::AddImageBlock<ImageType> create(master, image);
const int num_dims = 2;
vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds> decomposer(
num_dims, global_bounds, num_blocks);
decomposer.decompose(diy_comm.rank(), assigner, create);
vtkmdiy::RegularSwapPartners partners(decomposer, magic_k,
false); // false == distance halving
vtkmdiy::reduce(master, assigner, partners, reduce_images<ImageType>);
//MPICollect(image, diy_comm);
vtkmdiy::all_to_all(
master, assigner, vtkm::rendering::compositing::CollectImages<ImageType>(decomposer), magic_k);
if (diy_comm.rank() == 0)
{
master.prof.output(m_timing_log);
}
}
void RadixKCompositor::CompositeSurface(vtkmdiy::mpi::communicator& diy_comm,
vtkm::rendering::compositing::Image& image)
{
CompositeImpl(diy_comm, image);
}
void RadixKCompositor::CompositeSurface(vtkmdiy::mpi::communicator& diy_comm,
vtkm::rendering::compositing::PayloadImage& image)
{
CompositeImpl(diy_comm, image);
}
std::string RadixKCompositor::GetTimingString()
{
std::string res(m_timing_log.str());
m_timing_log.str("");
return res;
}
}
}
} //namespace vtkm::rendering::compositing

@ -0,0 +1,54 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_RadixKCompositor_h
#define vtk_m_rendering_compositing_RadixKCompositor_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/PayloadImage.h>
#include <vtkm/thirdparty/diy/diy.h>
#ifdef VTKM_ENABLE_MPI
//#include <mpi.h>
//#include <vtkm/thirdparty/diy/mpi.h>
//#include <vtkm/thirdparty/diy/mpi-cast.h>
#endif
namespace vtkm
{
namespace rendering
{
namespace compositing
{
class VTKM_RENDERING_EXPORT RadixKCompositor
{
public:
RadixKCompositor();
~RadixKCompositor();
void CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, Image& image);
void CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, PayloadImage& image);
template <typename ImageType>
void CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image);
std::string GetTimingString();
private:
std::stringstream m_timing_log;
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_RadixKCompositor_h

@ -0,0 +1,88 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_rendering_compositing_vtkm_diy_collect_h
#define vtk_m_rendering_compositing_vtkm_diy_collect_h
#include <vtkm/rendering/vtkm_rendering_export.h>
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/vtkm_diy_image_block.h>
#include <vtkmdiy/master.hpp>
#include <vtkmdiy/partners/swap.hpp>
#include <vtkmdiy/reduce-operations.hpp>
#include <vtkmdiy/reduce.hpp>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
template <typename ImageType>
struct CollectImages
{
const vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>& m_decomposer;
CollectImages(const vtkmdiy::RegularDecomposer<vtkmdiy::DiscreteBounds>& decomposer)
: m_decomposer(decomposer)
{
}
void operator()(void* b, const vtkmdiy::ReduceProxy& proxy) const
{
ImageBlock<ImageType>* block = reinterpret_cast<ImageBlock<ImageType>*>(b);
//
// first round we have no incoming. Take the images we have
// and sent them to to the right rank
//
const int collection_rank = 0;
if (proxy.in_link().size() == 0)
{
if (proxy.gid() != collection_rank)
{
int dest_gid = collection_rank;
vtkmdiy::BlockID dest = proxy.out_link().target(dest_gid);
proxy.enqueue(dest, block->m_image);
block->m_image.Clear();
}
} // if
else if (proxy.gid() == collection_rank)
{
ImageType final_image;
final_image.InitOriginal(block->m_image);
block->m_image.SubsetTo(final_image);
for (int i = 0; i < proxy.in_link().size(); ++i)
{
int gid = proxy.in_link().target(i).gid;
if (gid == collection_rank)
{
continue;
}
ImageType incoming;
proxy.dequeue(gid, incoming);
incoming.SubsetTo(final_image);
} // for
block->m_image.Swap(final_image);
} // else
} // operator
};
}
}
} //namespace vtkm::rendering::compositing
#endif //vtk_m_rendering_compositing_vtkm_diy_collect_h

@ -0,0 +1,194 @@
#ifndef VTKH_DIY_IMAGE_BLOCK_HPP
#define VTKH_DIY_IMAGE_BLOCK_HPP
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/PayloadImage.h>
#include <vtkmdiy/master.hpp>
namespace vtkm
{
namespace rendering
{
namespace compositing
{
template <typename ImageType>
struct ImageBlock
{
ImageType& m_image;
ImageBlock(ImageType& image)
: m_image(image)
{
}
};
struct MultiImageBlock
{
std::vector<vtkm::rendering::compositing::Image>& m_images;
vtkm::rendering::compositing::Image& m_output;
MultiImageBlock(std::vector<vtkm::rendering::compositing::Image>& images,
vtkm::rendering::compositing::Image& output)
: m_images(images)
, m_output(output)
{
}
};
template <typename ImageType>
struct AddImageBlock
{
ImageType& m_image;
const vtkmdiy::Master& m_master;
AddImageBlock(vtkmdiy::Master& master, ImageType& image)
: m_image(image)
, m_master(master)
{
}
template <typename BoundsType, typename LinkType>
void operator()(int gid,
const BoundsType&, // local_bounds
const BoundsType&, // local_with_ghost_bounds
const BoundsType&, // domain_bounds
const LinkType& link) const
{
ImageBlock<ImageType>* block = new ImageBlock<ImageType>(m_image);
LinkType* linked = new LinkType(link);
vtkmdiy::Master& master = const_cast<vtkmdiy::Master&>(m_master);
master.add(gid, block, linked);
}
};
struct AddMultiImageBlock
{
std::vector<vtkm::rendering::compositing::Image>& m_images;
vtkm::rendering::compositing::Image& m_output;
const vtkmdiy::Master& m_master;
AddMultiImageBlock(vtkmdiy::Master& master,
std::vector<vtkm::rendering::compositing::Image>& images,
vtkm::rendering::compositing::Image& output)
: m_master(master)
, m_images(images)
, m_output(output)
{
}
template <typename BoundsType, typename LinkType>
void operator()(int gid,
const BoundsType&, // local_bounds
const BoundsType&, // local_with_ghost_bounds
const BoundsType&, // domain_bounds
const LinkType& link) const
{
MultiImageBlock* block = new MultiImageBlock(m_images, m_output);
LinkType* linked = new LinkType(link);
vtkmdiy::Master& master = const_cast<vtkmdiy::Master&>(m_master);
int lid = master.add(gid, block, linked);
}
};
}
}
} //namespace vtkm::rendering::compositing
namespace vtkmdiy
{
template <>
struct Serialization<vtkm::rendering::compositing::PayloadImage>
{
static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::PayloadImage& image)
{
vtkmdiy::save(bb, image.m_orig_bounds.X.Min);
vtkmdiy::save(bb, image.m_orig_bounds.Y.Min);
vtkmdiy::save(bb, image.m_orig_bounds.Z.Min);
vtkmdiy::save(bb, image.m_orig_bounds.X.Max);
vtkmdiy::save(bb, image.m_orig_bounds.Y.Max);
vtkmdiy::save(bb, image.m_orig_bounds.Z.Max);
vtkmdiy::save(bb, image.m_bounds.X.Min);
vtkmdiy::save(bb, image.m_bounds.Y.Min);
vtkmdiy::save(bb, image.m_bounds.Z.Min);
vtkmdiy::save(bb, image.m_bounds.X.Max);
vtkmdiy::save(bb, image.m_bounds.Y.Max);
vtkmdiy::save(bb, image.m_bounds.Z.Max);
vtkmdiy::save(bb, image.m_payloads);
vtkmdiy::save(bb, image.m_payload_bytes);
vtkmdiy::save(bb, image.m_depths);
vtkmdiy::save(bb, image.m_orig_rank);
}
static void load(BinaryBuffer& bb, vtkm::rendering::compositing::PayloadImage& image)
{
vtkmdiy::load(bb, image.m_orig_bounds.X.Min);
vtkmdiy::load(bb, image.m_orig_bounds.Y.Min);
vtkmdiy::load(bb, image.m_orig_bounds.Z.Min);
vtkmdiy::load(bb, image.m_orig_bounds.X.Max);
vtkmdiy::load(bb, image.m_orig_bounds.Y.Max);
vtkmdiy::load(bb, image.m_orig_bounds.Z.Max);
vtkmdiy::load(bb, image.m_bounds.X.Min);
vtkmdiy::load(bb, image.m_bounds.Y.Min);
vtkmdiy::load(bb, image.m_bounds.Z.Min);
vtkmdiy::load(bb, image.m_bounds.X.Max);
vtkmdiy::load(bb, image.m_bounds.Y.Max);
vtkmdiy::load(bb, image.m_bounds.Z.Max);
vtkmdiy::load(bb, image.m_payloads);
vtkmdiy::load(bb, image.m_payload_bytes);
vtkmdiy::load(bb, image.m_depths);
vtkmdiy::load(bb, image.m_orig_rank);
}
};
template <>
struct Serialization<vtkm::rendering::compositing::Image>
{
static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::Image& image)
{
vtkmdiy::save(bb, image.m_orig_bounds.X.Min);
vtkmdiy::save(bb, image.m_orig_bounds.Y.Min);
vtkmdiy::save(bb, image.m_orig_bounds.Z.Min);
vtkmdiy::save(bb, image.m_orig_bounds.X.Max);
vtkmdiy::save(bb, image.m_orig_bounds.Y.Max);
vtkmdiy::save(bb, image.m_orig_bounds.Z.Max);
vtkmdiy::save(bb, image.m_bounds.X.Min);
vtkmdiy::save(bb, image.m_bounds.Y.Min);
vtkmdiy::save(bb, image.m_bounds.Z.Min);
vtkmdiy::save(bb, image.m_bounds.X.Max);
vtkmdiy::save(bb, image.m_bounds.Y.Max);
vtkmdiy::save(bb, image.m_bounds.Z.Max);
vtkmdiy::save(bb, image.m_pixels);
vtkmdiy::save(bb, image.m_depths);
vtkmdiy::save(bb, image.m_orig_rank);
vtkmdiy::save(bb, image.m_composite_order);
}
static void load(BinaryBuffer& bb, vtkm::rendering::compositing::Image& image)
{
vtkmdiy::load(bb, image.m_orig_bounds.X.Min);
vtkmdiy::load(bb, image.m_orig_bounds.Y.Min);
vtkmdiy::load(bb, image.m_orig_bounds.Z.Min);
vtkmdiy::load(bb, image.m_orig_bounds.X.Max);
vtkmdiy::load(bb, image.m_orig_bounds.Y.Max);
vtkmdiy::load(bb, image.m_orig_bounds.Z.Max);
vtkmdiy::load(bb, image.m_bounds.X.Min);
vtkmdiy::load(bb, image.m_bounds.Y.Min);
vtkmdiy::load(bb, image.m_bounds.Z.Min);
vtkmdiy::load(bb, image.m_bounds.X.Max);
vtkmdiy::load(bb, image.m_bounds.Y.Max);
vtkmdiy::load(bb, image.m_bounds.Z.Max);
vtkmdiy::load(bb, image.m_pixels);
vtkmdiy::load(bb, image.m_depths);
vtkmdiy::load(bb, image.m_orig_rank);
vtkmdiy::load(bb, image.m_composite_order);
}
};
} //namespace vtkmdiy
#endif

@ -0,0 +1,216 @@
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2018, Lawrence Livermore National Security, LLC.
//
// Produced at the Lawrence Livermore National Laboratory
//
// LLNL-CODE-749865
//
// All rights reserved.
//
// This file is part of Rover.
//
// Please also read rover/LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the disclaimer below.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the LLNS/LLNL nor the names of its contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
#ifndef rover_blocks_h
#define rover_blocks_h
#include <diy/master.hpp>
#include "AbsorptionPartial.hpp"
#include "EmissionPartial.hpp"
#include "VolumePartial.hpp"
namespace vtkh
{
//--------------------------------------Volume Block Structure-----------------------------------
template <typename FloatType>
struct VolumeBlock
{
typedef vtkhdiy::DiscreteBounds Bounds;
typedef VolumePartial<FloatType> PartialType;
std::vector<VolumePartial<FloatType>>& m_partials;
VolumeBlock(std::vector<VolumePartial<FloatType>>& partials)
: m_partials(partials)
{
}
};
//--------------------------------------Absorption Block Structure------------------------------
template <typename FloatType>
struct AbsorptionBlock
{
typedef vtkhdiy::DiscreteBounds Bounds;
typedef AbsorptionPartial<FloatType> PartialType;
std::vector<AbsorptionPartial<FloatType>>& m_partials;
AbsorptionBlock(std::vector<AbsorptionPartial<FloatType>>& partials)
: m_partials(partials)
{
}
};
//--------------------------------------Emission Block Structure------------------------------
template <typename FloatType>
struct EmissionBlock
{
typedef vtkhdiy::DiscreteBounds Bounds;
typedef EmissionPartial<FloatType> PartialType;
std::vector<EmissionPartial<FloatType>>& m_partials;
EmissionBlock(std::vector<EmissionPartial<FloatType>>& partials)
: m_partials(partials)
{
}
};
//--------------------------------------Add Block Template-----------------------------------
template <typename BlockType>
struct AddBlock
{
typedef typename BlockType::PartialType PartialType;
typedef BlockType Block;
std::vector<PartialType>& m_partials;
const vtkhdiy::Master& m_master;
AddBlock(vtkhdiy::Master& master, std::vector<PartialType>& partials)
: m_master(master)
, m_partials(partials)
{
}
template <typename BoundsType, typename LinkType>
void operator()(int gid,
const BoundsType& local_bounds,
const BoundsType& local_with_ghost_bounds,
const BoundsType& domain_bounds,
const LinkType& link) const
{
(void)local_bounds;
(void)domain_bounds;
(void)local_with_ghost_bounds;
Block* block = new Block(m_partials);
LinkType* rg_link = new LinkType(link);
vtkhdiy::Master& master = const_cast<vtkhdiy::Master&>(m_master);
int lid = master.add(gid, block, rg_link);
(void)lid;
}
};
} //namespace vtkh
//-------------------------------Serialization Specializations--------------------------------
namespace vtkhdiy
{
template <>
struct Serialization<vtkh::AbsorptionPartial<double>>
{
static void save(BinaryBuffer& bb, const vtkh::AbsorptionPartial<double>& partial)
{
vtkhdiy::save(bb, partial.m_bins);
vtkhdiy::save(bb, partial.m_pixel_id);
vtkhdiy::save(bb, partial.m_depth);
}
static void load(BinaryBuffer& bb, vtkh::AbsorptionPartial<double>& partial)
{
vtkhdiy::load(bb, partial.m_bins);
vtkhdiy::load(bb, partial.m_pixel_id);
vtkhdiy::load(bb, partial.m_depth);
}
};
template <>
struct Serialization<vtkh::AbsorptionPartial<float>>
{
static void save(BinaryBuffer& bb, const vtkh::AbsorptionPartial<float>& partial)
{
vtkhdiy::save(bb, partial.m_bins);
vtkhdiy::save(bb, partial.m_pixel_id);
vtkhdiy::save(bb, partial.m_depth);
}
static void load(BinaryBuffer& bb, vtkh::AbsorptionPartial<float>& partial)
{
vtkhdiy::load(bb, partial.m_bins);
vtkhdiy::load(bb, partial.m_pixel_id);
vtkhdiy::load(bb, partial.m_depth);
}
};
template <>
struct Serialization<vtkh::EmissionPartial<double>>
{
static void save(BinaryBuffer& bb, const vtkh::EmissionPartial<double>& partial)
{
vtkhdiy::save(bb, partial.m_bins);
vtkhdiy::save(bb, partial.m_emission_bins);
vtkhdiy::save(bb, partial.m_pixel_id);
vtkhdiy::save(bb, partial.m_depth);
}
static void load(BinaryBuffer& bb, vtkh::EmissionPartial<double>& partial)
{
vtkhdiy::load(bb, partial.m_bins);
vtkhdiy::load(bb, partial.m_emission_bins);
vtkhdiy::load(bb, partial.m_pixel_id);
vtkhdiy::load(bb, partial.m_depth);
}
};
template <>
struct Serialization<vtkh::EmissionPartial<float>>
{
static void save(BinaryBuffer& bb, const vtkh::EmissionPartial<float>& partial)
{
vtkhdiy::save(bb, partial.m_bins);
vtkhdiy::save(bb, partial.m_emission_bins);
vtkhdiy::save(bb, partial.m_pixel_id);
vtkhdiy::save(bb, partial.m_depth);
}
static void load(BinaryBuffer& bb, vtkh::EmissionPartial<float>& partial)
{
vtkhdiy::load(bb, partial.m_bins);
vtkhdiy::load(bb, partial.m_emission_bins);
vtkhdiy::load(bb, partial.m_pixel_id);
vtkhdiy::load(bb, partial.m_depth);
}
};
} // namespace diy
#endif

@ -0,0 +1,185 @@
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2018, Lawrence Livermore National Security, LLC.
//
// Produced at the Lawrence Livermore National Laboratory
//
// LLNL-CODE-749865
//
// All rights reserved.
//
// This file is part of Rover.
//
// Please also read rover/LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the disclaimer below.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the LLNS/LLNL nor the names of its contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
#ifndef rover_compositing_collect_h
#define rover_compositing_collect_h
#include "AbsorptionPartial.hpp"
#include "EmissionPartial.hpp"
#include "VolumePartial.hpp"
#include <diy/assigner.hpp>
#include <diy/decomposition.hpp>
#include <diy/master.hpp>
#include <diy/reduce-operations.hpp>
namespace vtkh
{
//
// Collect struct sends all data to a single node.
//
template <typename BlockType>
struct Collect
{
const vtkhdiy::RegularDecomposer<vtkhdiy::ContinuousBounds>& m_decomposer;
Collect(const vtkhdiy::RegularDecomposer<vtkhdiy::ContinuousBounds>& decomposer)
: m_decomposer(decomposer)
{
}
void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const
{
BlockType* block = static_cast<BlockType*>(v_block);
//
// first round we have no incoming. Take the partials we have
// and sent them to to the right rank
//
const int collection_rank = 0;
if (proxy.in_link().size() == 0 && proxy.gid() != collection_rank)
{
int dest_gid = collection_rank;
vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid);
proxy.enqueue(dest, block->m_partials);
block->m_partials.clear();
} // if
else if (proxy.gid() == collection_rank)
{
for (int i = 0; i < proxy.in_link().size(); ++i)
{
int gid = proxy.in_link().target(i).gid;
if (gid == collection_rank)
{
continue;
}
//TODO: leave the paritals that start here, here
std::vector<typename BlockType::PartialType> incoming_partials;
proxy.dequeue(gid, incoming_partials);
const int incoming_size = incoming_partials.size();
// TODO: make this a std::copy
for (int j = 0; j < incoming_size; ++j)
{
block->m_partials.push_back(incoming_partials[j]);
}
} // for
} // else
} // operator
};
//
// collect uses the all-to-all construct to perform a gather to
// the root rank. All other ranks will have no data
//
template <typename AddBlockType>
void collect_detail(std::vector<typename AddBlockType::PartialType>& partials, MPI_Comm comm)
{
typedef typename AddBlockType::Block Block;
vtkhdiy::mpi::communicator world(comm);
vtkhdiy::ContinuousBounds global_bounds;
global_bounds.min[0] = 0;
global_bounds.max[0] = 1;
// tells diy to use all availible threads
const int num_threads = -1;
const int num_blocks = world.size();
const int magic_k = 2;
vtkhdiy::Master master(world, num_threads);
// create an assigner with one block per rank
vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks);
AddBlockType create(master, partials);
const int dims = 1;
vtkhdiy::RegularDecomposer<vtkhdiy::ContinuousBounds> decomposer(dims, global_bounds, num_blocks);
decomposer.decompose(world.rank(), assigner, create);
vtkhdiy::all_to_all(master, assigner, Collect<Block>(decomposer), magic_k);
}
template <typename T>
void collect(std::vector<T>& partials, MPI_Comm comm);
template <>
void collect<VolumePartial<float>>(std::vector<VolumePartial<float>>& partials, MPI_Comm comm)
{
collect_detail<AddBlock<VolumeBlock<float>>>(partials, comm);
}
template <>
void collect<VolumePartial<double>>(std::vector<VolumePartial<double>>& partials, MPI_Comm comm)
{
collect_detail<AddBlock<VolumeBlock<double>>>(partials, comm);
}
template <>
void collect<AbsorptionPartial<double>>(std::vector<AbsorptionPartial<double>>& partials,
MPI_Comm comm)
{
collect_detail<AddBlock<AbsorptionBlock<double>>>(partials, comm);
}
template <>
void collect<AbsorptionPartial<float>>(std::vector<AbsorptionPartial<float>>& partials,
MPI_Comm comm)
{
collect_detail<AddBlock<AbsorptionBlock<float>>>(partials, comm);
}
template <>
void collect<EmissionPartial<double>>(std::vector<EmissionPartial<double>>& partials, MPI_Comm comm)
{
collect_detail<AddBlock<EmissionBlock<double>>>(partials, comm);
}
template <>
void collect<EmissionPartial<float>>(std::vector<EmissionPartial<float>>& partials, MPI_Comm comm)
{
collect_detail<AddBlock<EmissionBlock<float>>>(partials, comm);
}
} // namespace rover
#endif

@ -0,0 +1,226 @@
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2018, Lawrence Livermore National Security, LLC.
//
// Produced at the Lawrence Livermore National Laboratory
//
// LLNL-CODE-749865
//
// All rights reserved.
//
// This file is part of Rover.
//
// Please also read rover/LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the disclaimer below.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the LLNS/LLNL nor the names of its contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
#ifndef rover_compositing_redistribute_h
#define rover_compositing_redistribute_h
#include "vtkh_diy_partial_blocks.hpp"
#include <diy/assigner.hpp>
#include <diy/decomposition.hpp>
#include <diy/master.hpp>
#include <diy/reduce-operations.hpp>
#include <map>
namespace vtkh
{
//
// Redistributes partial composites to the ranks that owns
// that sectoon of the image. Currently, the domain is decomposed
// in 1-D from min_pixel to max_pixel.
//
template <typename BlockType>
struct Redistribute
{
const vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds>& m_decomposer;
Redistribute(const vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds>& decomposer)
: m_decomposer(decomposer)
{
}
void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const
{
BlockType* block = static_cast<BlockType*>(v_block);
//
// first round we have no incoming. Take the partials we have
// and sent them to to the right rank
//
if (proxy.in_link().size() == 0)
{
const int size = block->m_partials.size();
std::map<vtkhdiy::BlockID, std::vector<typename BlockType::PartialType>> outgoing;
for (int i = 0; i < size; ++i)
{
vtkhdiy::Point<int, DIY_MAX_DIM> point;
point[0] = block->m_partials[i].m_pixel_id;
int dest_gid = m_decomposer.point_to_gid(point);
vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid);
outgoing[dest].push_back(block->m_partials[i]);
} //for
block->m_partials.clear();
for (int i = 0; i < proxy.out_link().size(); ++i)
{
int dest_gid = proxy.out_link().target(i).gid;
vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid);
proxy.enqueue(dest, outgoing[dest]);
//outgoing[dest].clear();
}
} // if
else
{
for (int i = 0; i < proxy.in_link().size(); ++i)
{
int gid = proxy.in_link().target(i).gid;
std::vector<typename BlockType::PartialType> incoming_partials;
proxy.dequeue(gid, incoming_partials);
const int incoming_size = incoming_partials.size();
// TODO: make this a std::copy
for (int j = 0; j < incoming_size; ++j)
{
block->m_partials.push_back(incoming_partials[j]);
}
} // for
} // else
MPI_Barrier(MPI_COMM_WORLD); //HACK
} // operator
};
template <typename AddBlockType>
void redistribute_detail(std::vector<typename AddBlockType::PartialType>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
typedef typename AddBlockType::Block Block;
vtkhdiy::mpi::communicator world(comm);
vtkhdiy::DiscreteBounds global_bounds;
global_bounds.min[0] = domain_min_pixel;
global_bounds.max[0] = domain_max_pixel;
// tells diy to use all availible threads
const int num_threads = 1;
const int num_blocks = world.size();
const int magic_k = 2;
vtkhdiy::Master master(world, num_threads);
// create an assigner with one block per rank
vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks);
AddBlockType create(master, partials);
const int dims = 1;
vtkhdiy::RegularDecomposer<vtkhdiy::DiscreteBounds> decomposer(dims, global_bounds, num_blocks);
decomposer.decompose(world.rank(), assigner, create);
vtkhdiy::all_to_all(master, assigner, Redistribute<Block>(decomposer), magic_k);
}
//
// Define a default template that cannot be instantiated
//
template <typename T>
void redistribute(std::vector<T>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel);
// ----------------------------- VolumePartial Specialization------------------------------------------
template <>
void redistribute<VolumePartial<float>>(std::vector<VolumePartial<float>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<VolumeBlock<float>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
template <>
void redistribute<VolumePartial<double>>(std::vector<VolumePartial<double>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<VolumeBlock<double>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
// ----------------------------- AbsorpPartial Specialization------------------------------------------
template <>
void redistribute<AbsorptionPartial<double>>(std::vector<AbsorptionPartial<double>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<AbsorptionBlock<double>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
template <>
void redistribute<AbsorptionPartial<float>>(std::vector<AbsorptionPartial<float>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<AbsorptionBlock<float>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
// ----------------------------- EmissPartial Specialization------------------------------------------
template <>
void redistribute<EmissionPartial<double>>(std::vector<EmissionPartial<double>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<EmissionBlock<double>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
template <>
void redistribute<EmissionPartial<float>>(std::vector<EmissionPartial<float>>& partials,
MPI_Comm comm,
const int& domain_min_pixel,
const int& domain_max_pixel)
{
redistribute_detail<AddBlock<EmissionBlock<float>>>(
partials, comm, domain_min_pixel, domain_max_pixel);
}
} //namespace rover
#endif

@ -0,0 +1,49 @@
#ifndef VTKH_DIY_UTILS_HPP
#define VTKH_DIY_UTILS_HPP
#include <vtkm/Bounds.h>
#include <vtkmdiy/decomposition.hpp>
namespace vtkh
{
static vtkm::Bounds DIYBoundsToVTKM(const vtkmdiy::DiscreteBounds& bounds)
{
vtkm::Bounds vtkm_bounds;
vtkm_bounds.X.Min = bounds.min[0];
vtkm_bounds.Y.Min = bounds.min[1];
vtkm_bounds.Z.Min = bounds.min[2];
vtkm_bounds.X.Max = bounds.max[0];
vtkm_bounds.Y.Max = bounds.max[1];
vtkm_bounds.Z.Max = bounds.max[2];
return vtkm_bounds;
}
static vtkmdiy::DiscreteBounds VTKMBoundsToDIY(const vtkm::Bounds& bounds)
{
vtkmdiy::DiscreteBounds diy_bounds(3);
diy_bounds.min[0] = bounds.X.Min;
diy_bounds.min[1] = bounds.Y.Min;
diy_bounds.max[0] = bounds.X.Max;
diy_bounds.max[1] = bounds.Y.Max;
if (bounds.Z.IsNonEmpty())
{
diy_bounds.min[2] = bounds.Z.Min;
diy_bounds.max[2] = bounds.Z.Max;
}
else
{
diy_bounds.min[2] = 0;
diy_bounds.max[2] = 0;
}
return diy_bounds;
}
} //namespace vtkh
#endif

@ -26,3 +26,16 @@ set(unit_tests
)
vtkm_unit_tests(SOURCES ${unit_tests})
#add distributed tests i.e.test to run with MPI
#if MPI is enabled.
if (VTKm_ENABLE_MPI)
set(mpi_unit_tests
UnitTestImageCompositing.cxx
)
vtkm_unit_tests(
MPI
DEVICE_SOURCES ${mpi_unit_tests}
USE_VTKM_JOB_POOL
)
endif()

@ -0,0 +1,199 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/cont/testing/Testing.h>
#include <vtkm/rendering/compositing/Compositor.h>
#include <vtkm/rendering/compositing/Image.h>
#include <vtkm/rendering/compositing/ImageCompositor.h>
#include <vtkm/rendering/testing/t_vtkm_test_utils.h>
#include <vtkm/rendering/Actor.h>
#include <vtkm/rendering/CanvasRayTracer.h>
#include <vtkm/rendering/MapperRayTracer.h>
#include <vtkm/rendering/MapperVolume.h>
#include <vtkm/rendering/MapperWireframer.h>
#include <vtkm/rendering/Scene.h>
#include <vtkm/rendering/View3D.h>
#include <vtkm/rendering/testing/RenderTest.h>
#include <vtkm/io/VTKDataSetReader.h>
namespace
{
template <typename T>
T* GetVTKMPointer(vtkm::cont::ArrayHandle<T>& handle)
{
return handle.WritePortal().GetArray();
}
vtkm::cont::DataSet ReadDS(int rank)
{
std::string vtkFile;
vtkm::io::VTKDataSetReader reader(vtkFile);
}
vtkm::rendering::compositing::Image ConstImage(const std::size_t& width,
const std::size_t& height,
const vtkm::Vec4f& rgba,
const vtkm::FloatDefault& depth)
{
auto numPix = width * height;
std::vector<vtkm::FloatDefault> rgbaVals(numPix * 4);
std::vector<vtkm::FloatDefault> depthVals(numPix, depth);
for (std::size_t i = 0; i < numPix; i++)
{
rgbaVals[i * 4 + 0] = rgba[0];
rgbaVals[i * 4 + 1] = rgba[1];
rgbaVals[i * 4 + 2] = rgba[2];
rgbaVals[i * 4 + 3] = rgba[3];
}
vtkm::rendering::compositing::Image img(vtkm::Bounds(0, width, 0, height, 0, 1));
img.Init(rgbaVals.data(), depthVals.data(), width, height);
return img;
}
void TestImageComposite()
{
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
std::size_t width = 4, height = 4;
//res is the background, initially black.
auto img0 = ConstImage(width, height, { 1, 0, 0, 1 }, 1.0);
auto img1 = ConstImage(width, height, { 0, 1, 1, .5 }, 0.5);
vtkm::rendering::compositing::Compositor compositor;
compositor.SetCompositeMode(vtkm::rendering::compositing::Compositor::Z_BUFFER_SURFACE);
vtkm::rendering::compositing::Image img;
if (comm.rank() == 0)
img = ConstImage(width, height, { 1, 0, 0, 1 }, 1.0);
else
img = ConstImage(width, height, { 0, 1, 1, .5 }, 0.5);
compositor.AddImage(img.m_pixels.data(), img.m_depths.data(), width, height);
auto res = compositor.Composite();
//vtkm::rendering::compositing::ImageCompositor imgCompositor;
//compositor.ZBufferComposite(res, img);
//compositor.Blend(res, img);
if (comm.rank() == 0)
{
for (int i = 0; i < width * height; i++)
{
std::cout << i << ": ";
std::cout << (int)res.m_pixels[i * 4 + 0] << " ";
std::cout << (int)res.m_pixels[i * 4 + 1] << " ";
std::cout << (int)res.m_pixels[i * 4 + 2] << " ";
std::cout << (int)res.m_pixels[i * 4 + 3] << " ";
std::cout << res.m_depths[i] << std::endl;
}
}
}
void TestRenderComposite()
{
using vtkm::rendering::CanvasRayTracer;
using vtkm::rendering::MapperRayTracer;
using vtkm::rendering::MapperVolume;
using vtkm::rendering::MapperWireframer;
auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator();
int numBlocks = comm.size() * 1;
int rank = comm.rank();
std::string fieldName = "tangle";
std::string fname = "";
if (comm.rank() == 0)
fname = "/home/dpn/tangle0.vtk";
else
fname = "/home/dpn/tangle1.vtk";
vtkm::io::VTKDataSetReader reader(fname);
auto ds = reader.ReadDataSet();
ds.PrintSummary(std::cout);
/*
vtkm::source::Tangle tangle;
tangle.SetPointDimensions({ 50, 50, 50 });
vtkm::cont::DataSet ds = tangle.Execute();
*/
//auto ds = CreateTestData(rank, numBlocks, 32);
//auto fieldName = "point_data_Float32";
/*
vtkm::rendering::testing::RenderTestOptions options;
options.Mapper = vtkm::rendering::testing::MapperType::RayTracer;
options.AllowAnyDevice = false;
options.ColorTable = vtkm::cont::ColorTable::Preset::Inferno;
vtkm::rendering::testing::RenderTest(ds, "point_data_Float32", "rendering/raytracer/regular3D.png", options);
*/
vtkm::rendering::Camera camera;
camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5));
camera.SetLookAt(vtkm::Vec3f_32(1.0, 0.5, 0.5));
camera.SetViewUp(vtkm::make_Vec(0.f, 1.f, 0.f));
camera.SetClippingRange(1.f, 10.f);
camera.SetFieldOfView(60.f);
camera.SetPosition(vtkm::Vec3f_32(1.5, 1.5, 1.5));
camera.SetPosition(vtkm::Vec3f_32(3, 3, 3));
vtkm::cont::ColorTable colorTable("inferno");
// Background color:
vtkm::rendering::Color bg(0.2f, 0.2f, 0.2f, 1.0f);
vtkm::rendering::Actor actor(
ds.GetCellSet(), ds.GetCoordinateSystem(), ds.GetField(fieldName), colorTable);
vtkm::rendering::Scene scene;
scene.AddActor(actor);
int width = 512, height = 512;
CanvasRayTracer canvas(width, height);
vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg);
view.Paint();
if (comm.rank() == 0)
view.SaveAs("volume0.png");
else
view.SaveAs("volume1.png");
auto colors = &GetVTKMPointer(canvas.GetColorBuffer())[0][0];
auto depths = GetVTKMPointer(canvas.GetDepthBuffer());
vtkm::rendering::compositing::Compositor compositor;
compositor.AddImage(colors, depths, width, height);
auto res = compositor.Composite();
res.Save("RESULT.png", { "" });
}
void RenderTests()
{
// TestImageComposite();
TestRenderComposite();
}
} //namespace
int UnitTestImageCompositing(int argc, char* argv[])
{
return vtkm::cont::testing::Testing::Run(RenderTests, argc, argv);
}

@ -0,0 +1,773 @@
#ifndef t_test_utils_h
#define t_test_utils_h
#include <assert.h>
#include <random>
#include <vtkm/Matrix.h>
#include <vtkm/VectorAnalysis.h>
#include <vtkm/cont/DataSet.h>
#include <vtkm/cont/DataSetBuilderExplicit.h>
#include <vtkm/cont/DataSetBuilderRectilinear.h>
#include <vtkm/cont/DataSetBuilderUniform.h>
//#include <vtkm/cont/testing/Testing.h>
#define BASE_SIZE 32
typedef vtkm::cont::ArrayHandleUniformPointCoordinates UniformCoords;
struct SpatialDivision
{
int m_mins[3];
int m_maxs[3];
SpatialDivision()
: m_mins{ 0, 0, 0 }
, m_maxs{ 1, 1, 1 }
{
}
bool CanSplit(int dim) { return m_maxs[dim] - m_mins[dim] + 1 > 1; }
SpatialDivision Split(int dim)
{
SpatialDivision r_split;
r_split = *this;
assert(CanSplit(dim));
int size = m_maxs[dim] - m_mins[dim] + 1;
int left_offset = size / 2;
//shrink the left side
m_maxs[dim] = m_mins[dim] + left_offset - 1;
//shrink the right side
r_split.m_mins[dim] = m_maxs[dim] + 1;
return r_split;
}
};
SpatialDivision GetBlock(int block, int num_blocks, SpatialDivision total_size)
{
std::vector<SpatialDivision> divs;
divs.push_back(total_size);
int avail = num_blocks - 1;
int current_dim = 0;
int missed_splits = 0;
const int num_dims = 3;
while (avail > 0)
{
const int current_size = divs.size();
int temp_avail = avail;
for (int i = 0; i < current_size; ++i)
{
if (avail == 0)
break;
if (!divs[i].CanSplit(current_dim))
{
continue;
}
divs.push_back(divs[i].Split(current_dim));
--avail;
}
if (temp_avail == avail)
{
// dims were too small to make any spit
missed_splits++;
if (missed_splits == 3)
{
// we tried all three dims and could
// not make a split.
for (int i = 0; i < avail; ++i)
{
SpatialDivision empty;
empty.m_maxs[0] = 0;
empty.m_maxs[1] = 0;
empty.m_maxs[2] = 0;
divs.push_back(empty);
}
if (block == 0)
{
std::cerr << "** Warning **: data set size is too small to"
<< " divide between " << num_blocks << " blocks. "
<< " Adding " << avail << " empty data sets\n";
}
avail = 0;
}
}
else
{
missed_splits = 0;
}
current_dim = (current_dim + 1) % num_dims;
}
return divs.at(block);
}
template <typename FieldType>
vtkm::cont::Field CreateCellScalarField(int size, const char* fieldName)
{
vtkm::cont::ArrayHandle<FieldType> data;
data.Allocate(size);
for (int i = 0; i < size; ++i)
{
FieldType val = i / vtkm::Float32(size);
data.WritePortal().Set(i, val);
}
vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Cells, data);
return field;
}
vtkm::cont::Field CreateGhostScalarField(vtkm::Id3 dims)
{
vtkm::Int32 size = dims[0] * dims[1] * dims[2];
vtkm::cont::ArrayHandle<vtkm::Int32> data;
data.Allocate(size);
for (int z = 0; z < dims[2]; ++z)
for (int y = 0; y < dims[1]; ++y)
for (int x = 0; x < dims[0]; ++x)
{
vtkm::UInt8 flag = 0;
if (x < 1 || x > dims[0] - 2)
flag = 1;
if (y < 1 || y > dims[1] - 2)
flag = 1;
if (z < 1 || z > dims[2] - 2)
flag = 1;
vtkm::Id index = z * dims[0] * dims[1] + y * dims[0] + x;
data.WritePortal().Set(index, flag);
}
vtkm::cont::Field field("ghosts", vtkm::cont::Field::Association::Cells, data);
return field;
}
template <typename FieldType>
vtkm::cont::Field CreatePointScalarField(UniformCoords coords, const char* fieldName)
{
const int size = coords.GetNumberOfValues();
vtkm::cont::ArrayHandle<FieldType> data;
data.Allocate(size);
auto portal = coords.ReadPortal();
for (int i = 0; i < size; ++i)
{
vtkm::Vec<FieldType, 3> point = portal.Get(i);
FieldType val = vtkm::Magnitude(point) + 1.f;
data.WritePortal().Set(i, val);
}
vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Points, data);
return field;
}
template <typename FieldType>
vtkm::cont::Field CreatePointVecField(int size, const char* fieldName)
{
vtkm::cont::ArrayHandle<vtkm::Vec<FieldType, 3>> data;
data.Allocate(size);
for (int i = 0; i < size; ++i)
{
FieldType val = i / FieldType(size);
vtkm::Vec<FieldType, 3> vec(val, -val, val);
data.WritePortal().Set(i, vec);
}
vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Points, data);
return field;
}
vtkm::cont::DataSet CreateTestData(int block, int num_blocks, int base_size)
{
SpatialDivision mesh_size;
mesh_size.m_mins[0] = 0;
mesh_size.m_mins[1] = 0;
mesh_size.m_mins[2] = 0;
mesh_size.m_maxs[0] = num_blocks * base_size - 1;
mesh_size.m_maxs[1] = num_blocks * base_size - 1;
mesh_size.m_maxs[2] = num_blocks * base_size - 1;
SpatialDivision local_block = GetBlock(block, num_blocks, mesh_size);
vtkm::Vec<vtkm::Float32, 3> origin;
origin[0] = local_block.m_mins[0];
origin[1] = local_block.m_mins[1];
origin[2] = local_block.m_mins[2];
vtkm::Vec<vtkm::Float32, 3> spacing(1.f, 1.f, 1.f);
vtkm::Id3 point_dims;
point_dims[0] = local_block.m_maxs[0] - local_block.m_mins[0] + 2;
point_dims[1] = local_block.m_maxs[1] - local_block.m_mins[1] + 2;
point_dims[2] = local_block.m_maxs[2] - local_block.m_mins[2] + 2;
vtkm::Id3 cell_dims;
cell_dims[0] = point_dims[0] - 1;
cell_dims[1] = point_dims[1] - 1;
cell_dims[2] = point_dims[2] - 1;
vtkm::cont::DataSet data_set;
UniformCoords point_handle(point_dims, origin, spacing);
vtkm::cont::CoordinateSystem coords("coords", point_handle);
data_set.AddCoordinateSystem(coords);
vtkm::cont::CellSetStructured<3> cell_set;
cell_set.SetPointDimensions(point_dims);
data_set.SetCellSet(cell_set);
int num_points = point_dims[0] * point_dims[1] * point_dims[2];
int num_cells = cell_dims[0] * cell_dims[1] * cell_dims[2];
data_set.AddField(CreatePointScalarField<vtkm::Float32>(point_handle, "point_data_Float32"));
data_set.AddField(CreatePointVecField<vtkm::Float32>(num_points, "vector_data_Float32"));
data_set.AddField(CreateCellScalarField<vtkm::Float32>(num_cells, "cell_data_Float32"));
data_set.AddField(CreatePointScalarField<vtkm::Float64>(point_handle, "point_data_Float64"));
data_set.AddField(CreatePointVecField<vtkm::Float64>(num_points, "vector_data_Float64"));
data_set.AddField(CreateCellScalarField<vtkm::Float64>(num_cells, "cell_data_Float64"));
data_set.AddField(CreateGhostScalarField(cell_dims));
return data_set;
}
vtkm::cont::DataSet CreateTestDataRectilinear(int block, int num_blocks, int base_size)
{
SpatialDivision mesh_size;
mesh_size.m_mins[0] = 0;
mesh_size.m_mins[1] = 0;
mesh_size.m_mins[2] = 0;
mesh_size.m_maxs[0] = num_blocks * base_size - 1;
mesh_size.m_maxs[1] = num_blocks * base_size - 1;
mesh_size.m_maxs[2] = num_blocks * base_size - 1;
SpatialDivision local_block = GetBlock(block, num_blocks, mesh_size);
vtkm::Vec<vtkm::Float32, 3> origin;
origin[0] = local_block.m_mins[0];
origin[1] = local_block.m_mins[1];
origin[2] = local_block.m_mins[2];
vtkm::Vec<vtkm::Float32, 3> spacing(1.f, 1.f, 1.f);
vtkm::Id3 point_dims;
point_dims[0] = local_block.m_maxs[0] - local_block.m_mins[0] + 2;
point_dims[1] = local_block.m_maxs[1] - local_block.m_mins[1] + 2;
point_dims[2] = local_block.m_maxs[2] - local_block.m_mins[2] + 2;
vtkm::Id3 cell_dims;
cell_dims[0] = point_dims[0] - 1;
cell_dims[1] = point_dims[1] - 1;
cell_dims[2] = point_dims[2] - 1;
std::vector<vtkm::Float64> xvals, yvals, zvals;
xvals.resize((size_t)point_dims[0]);
xvals[0] = static_cast<vtkm::Float64>(local_block.m_mins[0]);
for (size_t i = 1; i < (size_t)point_dims[0]; i++)
xvals[i] = xvals[i - 1] + spacing[0];
yvals.resize((size_t)point_dims[1]);
yvals[0] = static_cast<vtkm::Float64>(local_block.m_mins[1]);
for (size_t i = 1; i < (size_t)point_dims[1]; i++)
yvals[i] = yvals[i - 1] + spacing[1];
zvals.resize((size_t)point_dims[2]);
zvals[0] = static_cast<vtkm::Float64>(local_block.m_mins[2]);
for (size_t i = 1; i < (size_t)point_dims[2]; i++)
zvals[i] = zvals[i - 1] + spacing[2];
vtkm::cont::DataSetBuilderRectilinear dataSetBuilder;
vtkm::cont::DataSet data_set = dataSetBuilder.Create(xvals, yvals, zvals);
int num_points = point_dims[0] * point_dims[1] * point_dims[2];
data_set.AddField(CreatePointVecField<vtkm::Float32>(num_points, "vector_data_Float32"));
data_set.AddField(CreatePointVecField<vtkm::Float64>(num_points, "vector_data_Float64"));
return data_set;
}
vtkm::cont::DataSet CreateTestDataPoints(int num_points)
{
std::vector<double> x_vals;
std::vector<double> y_vals;
std::vector<double> z_vals;
std::vector<vtkm::UInt8> shapes;
std::vector<vtkm::IdComponent> num_indices;
std::vector<vtkm::Id> conn;
std::vector<double> field;
x_vals.resize(num_points);
y_vals.resize(num_points);
z_vals.resize(num_points);
shapes.resize(num_points);
conn.resize(num_points);
num_indices.resize(num_points);
field.resize(num_points);
std::linear_congruential_engine<std::uint_fast32_t, 48271, 0, 2147483647> rgen{ 0 };
std::uniform_real_distribution<double> dist{ -10., 10. };
for (int i = 0; i < num_points; ++i)
{
x_vals[i] = dist(rgen);
y_vals[i] = dist(rgen);
z_vals[i] = dist(rgen);
field[i] = dist(rgen);
shapes[i] = vtkm::CELL_SHAPE_VERTEX;
num_indices[i] = 1;
conn[i] = i;
}
vtkm::cont::DataSetBuilderExplicit dataSetBuilder;
vtkm::cont::DataSet data_set =
dataSetBuilder.Create(x_vals, y_vals, z_vals, shapes, num_indices, conn);
vtkm::cont::Field vfield = vtkm::cont::make_Field(
"point_data_Float64", vtkm::cont::Field::Association::Points, field, vtkm::CopyFlag::On);
data_set.AddField(vfield);
return data_set;
}
//-----------------------------------------------------------------------------
//Create VTK-m Data Sets
//-----------------------------------------------------------------------------
//Make a 2Duniform dataset.
inline vtkm::cont::DataSet Make2DUniformDataSet0()
{
vtkm::cont::DataSetBuilderUniform dsb;
constexpr vtkm::Id2 dimensions(3, 2);
vtkm::cont::DataSet dataSet = dsb.Create(dimensions);
constexpr vtkm::Id nVerts = 6;
constexpr vtkm::Float32 var[nVerts] = { 10.1f, 20.1f, 30.1f, 40.1f, 50.1f, 60.1f };
dataSet.AddPointField("pointvar", var, nVerts);
constexpr vtkm::Float32 cellvar[2] = { 100.1f, 200.1f };
dataSet.AddCellField("cellvar", cellvar, 2);
return dataSet;
}
//Make a 2D rectilinear dataset.
inline vtkm::cont::DataSet Make2DRectilinearDataSet0()
{
vtkm::cont::DataSetBuilderRectilinear dsb;
std::vector<vtkm::Float32> X(3), Y(2);
X[0] = 0.0f;
X[1] = 1.0f;
X[2] = 2.0f;
Y[0] = 0.0f;
Y[1] = 1.0f;
vtkm::cont::DataSet dataSet = dsb.Create(X, Y);
const vtkm::Id nVerts = 6;
vtkm::Float32 var[nVerts];
for (int i = 0; i < nVerts; i++)
var[i] = (vtkm::Float32)i;
dataSet.AddPointField("pointvar", var, nVerts);
const vtkm::Id nCells = 2;
vtkm::Float32 cellvar[nCells];
for (int i = 0; i < nCells; i++)
cellvar[i] = (vtkm::Float32)i;
dataSet.AddCellField("cellvar", cellvar, nCells);
return dataSet;
}
inline vtkm::cont::DataSet Make3DExplicitDataSet5()
{
vtkm::cont::DataSet dataSet;
const int nVerts = 11;
using CoordType = vtkm::Vec3f_32;
CoordType coordinates[nVerts] = {
CoordType(0, 0, 0), //0
CoordType(1, 0, 0), //1
CoordType(1, 0, 1), //2
CoordType(0, 0, 1), //3
CoordType(0, 1, 0), //4
CoordType(1, 1, 0), //5
CoordType(1, 1, 1), //6
CoordType(0, 1, 1), //7
CoordType(2, 0.5, 0.5), //8
CoordType(0, 2, 0), //9
CoordType(1, 2, 0) //10
};
vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.2f, 40.2f, 50.3f, 60.2f,
70.2f, 80.3f, 90.f, 10.f, 11.f };
dataSet.AddCoordinateSystem(
vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On));
//Set point scalar
dataSet.AddField(make_Field(
"pointvar", vtkm::cont::Field::Association::Points, vars, nVerts, vtkm::CopyFlag::On));
//Set cell scalar
const int nCells = 4;
vtkm::Float32 cellvar[nCells] = { 100.1f, 110.f, 120.2f, 130.5f };
dataSet.AddField(make_Field(
"cellvar", vtkm::cont::Field::Association::Cells, cellvar, nCells, vtkm::CopyFlag::On));
vtkm::cont::CellSetExplicit<> cellSet;
vtkm::Vec<vtkm::Id, 8> ids;
cellSet.PrepareToAddCells(nCells, 23);
ids[0] = 0;
ids[1] = 1;
ids[2] = 5;
ids[3] = 4;
ids[4] = 3;
ids[5] = 2;
ids[6] = 6;
ids[7] = 7;
cellSet.AddCell(vtkm::CELL_SHAPE_HEXAHEDRON, 8, ids);
ids[0] = 1;
ids[1] = 5;
ids[2] = 6;
ids[3] = 2;
ids[4] = 8;
cellSet.AddCell(vtkm::CELL_SHAPE_PYRAMID, 5, ids);
ids[0] = 5;
ids[1] = 8;
ids[2] = 10;
ids[3] = 6;
cellSet.AddCell(vtkm::CELL_SHAPE_TETRA, 4, ids);
ids[0] = 4;
ids[1] = 7;
ids[2] = 9;
ids[3] = 5;
ids[4] = 6;
ids[5] = 10;
cellSet.AddCell(vtkm::CELL_SHAPE_WEDGE, 6, ids);
cellSet.CompleteAddingCells(nVerts);
//todo this need to be a reference/shared_ptr style class
dataSet.SetCellSet(cellSet);
return dataSet;
}
inline vtkm::cont::DataSet Make3DUniformDataSet0()
{
vtkm::cont::DataSetBuilderUniform dsb;
constexpr vtkm::Id3 dimensions(3, 2, 3);
vtkm::cont::DataSet dataSet = dsb.Create(dimensions);
constexpr int nVerts = 18;
constexpr vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.1f, 40.1f, 50.2f, 60.2f,
70.2f, 80.2f, 90.3f, 100.3f, 110.3f, 120.3f,
130.4f, 140.4f, 150.4f, 160.4f, 170.5f, 180.5f };
//Set point and cell scalar
dataSet.AddPointField("pointvar", vars, nVerts);
constexpr vtkm::Float32 cellvar[4] = { 100.1f, 100.2f, 100.3f, 100.4f };
dataSet.AddCellField("cellvar", cellvar, 4);
return dataSet;
}
inline vtkm::cont::DataSet Make3DExplicitDataSet2()
{
vtkm::cont::DataSet dataSet;
const int nVerts = 8;
using CoordType = vtkm::Vec3f_32;
CoordType coordinates[nVerts] = {
CoordType(0, 0, 0), // 0
CoordType(1, 0, 0), // 1
CoordType(1, 0, 1), // 2
CoordType(0, 0, 1), // 3
CoordType(0, 1, 0), // 4
CoordType(1, 1, 0), // 5
CoordType(1, 1, 1), // 6
CoordType(0, 1, 1) // 7
};
vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.2f, 40.2f, 50.3f, 60.2f, 70.2f, 80.3f };
dataSet.AddCoordinateSystem(
vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On));
//Set point scalar
dataSet.AddField(make_Field(
"pointvar", vtkm::cont::Field::Association::Points, vars, nVerts, vtkm::CopyFlag::On));
//Set cell scalar
vtkm::Float32 cellvar[2] = { 100.1f };
dataSet.AddField(
make_Field("cellvar", vtkm::cont::Field::Association::Cells, cellvar, 1, vtkm::CopyFlag::On));
vtkm::cont::CellSetExplicit<> cellSet;
vtkm::Vec<vtkm::Id, 8> ids;
ids[0] = 0;
ids[1] = 1;
ids[2] = 2;
ids[3] = 3;
ids[4] = 4;
ids[5] = 5;
ids[6] = 6;
ids[7] = 7;
cellSet.PrepareToAddCells(1, 8);
cellSet.AddCell(vtkm::CELL_SHAPE_HEXAHEDRON, 8, ids);
cellSet.CompleteAddingCells(nVerts);
//todo this need to be a reference/shared_ptr style class
dataSet.SetCellSet(cellSet);
return dataSet;
}
#if 0
namespace detail
{
template <typename T>
struct TestValueImpl;
} //namespace detail
// Many tests involve getting and setting values in some index-based structure
// (like an array). These tests also often involve trying many types. The
// overloaded TestValue function returns some unique value for an index for a
// given type. Different types might give different values.
//
template <typename T>
static inline T TestValue(vtkm::Id index, T)
{
return detail::TestValueImpl<T>()(index);
}
namespace detail
{
template <typename T>
struct TestValueImpl
{
T DoIt(vtkm::Id index, vtkm::TypeTraitsIntegerTag) const
{
constexpr bool larger_than_2bytes = sizeof(T) > 2;
if (larger_than_2bytes)
{
return T(index * 100);
}
else
{
return T(index + 100);
}
}
T DoIt(vtkm::Id index, vtkm::TypeTraitsRealTag) const
{
return T(0.01f * static_cast<float>(index) + 1.001f);
}
T operator()(vtkm::Id index) const
{
return this->DoIt(index, typename vtkm::TypeTraits<T>::NumericTag());
}
};
template <typename T, vtkm::IdComponent N>
struct TestValueImpl<vtkm::Vec<T, N>>
{
vtkm::Vec<T, N> operator()(vtkm::Id index) const
{
vtkm::Vec<T, N> value;
for (vtkm::IdComponent i = 0; i < N; i++)
{
value[i] = TestValue(index * N + i, T());
}
return value;
}
};
template <typename U, typename V>
struct TestValueImpl<vtkm::Pair<U, V>>
{
vtkm::Pair<U, V> operator()(vtkm::Id index) const
{
return vtkm::Pair<U, V>(TestValue(2 * index, U()), TestValue(2 * index + 1, V()));
}
};
template <typename T, vtkm::IdComponent NumRow, vtkm::IdComponent NumCol>
struct TestValueImpl<vtkm::Matrix<T, NumRow, NumCol>>
{
vtkm::Matrix<T, NumRow, NumCol> operator()(vtkm::Id index) const
{
vtkm::Matrix<T, NumRow, NumCol> value;
vtkm::Id runningIndex = index * NumRow * NumCol;
for (vtkm::IdComponent row = 0; row < NumRow; ++row)
{
for (vtkm::IdComponent col = 0; col < NumCol; ++col)
{
value(row, col) = TestValue(runningIndex, T());
++runningIndex;
}
}
return value;
}
};
template <>
struct TestValueImpl<std::string>
{
std::string operator()(vtkm::Id index) const
{
std::stringstream stream;
stream << index;
return stream.str();
}
};
} //namespace detail
// Verifies that the contents of the given array portal match the values
// returned by vtkm::testing::TestValue.
template <typename PortalType>
static inline void CheckPortal(const PortalType& portal)
{
using ValueType = typename PortalType::ValueType;
for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++)
{
ValueType expectedValue = TestValue(index, ValueType());
ValueType foundValue = portal.Get(index);
if (!test_equal(expectedValue, foundValue))
{
ASCENT_ERROR("Got unexpected value in array. Expected: " << expectedValue
<< ", Found: " << foundValue << "\n");
}
}
}
/// Sets all the values in a given array portal to be the values returned
/// by vtkm::testing::TestValue. The ArrayPortal must be allocated first.
template <typename PortalType>
static inline void SetPortal(const PortalType& portal)
{
using ValueType = typename PortalType::ValueType;
for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++)
{
portal.Set(index, TestValue(index, ValueType()));
}
}
#endif
inline vtkm::cont::DataSet Make3DExplicitDataSetCowNose()
{
// prepare data array
const int nVerts = 17;
using CoordType = vtkm::Vec3f_64;
CoordType coordinates[nVerts] = {
CoordType(0.0480879, 0.151874, 0.107334), CoordType(0.0293568, 0.245532, 0.125337),
CoordType(0.0224398, 0.246495, 0.1351), CoordType(0.0180085, 0.20436, 0.145316),
CoordType(0.0307091, 0.152142, 0.0539249), CoordType(0.0270341, 0.242992, 0.107567),
CoordType(0.000684071, 0.00272505, 0.175648), CoordType(0.00946217, 0.077227, 0.187097),
CoordType(-0.000168991, 0.0692243, 0.200755), CoordType(-0.000129414, 0.00247137, 0.176561),
CoordType(0.0174172, 0.137124, 0.124553), CoordType(0.00325994, 0.0797155, 0.184912),
CoordType(0.00191765, 0.00589327, 0.16608), CoordType(0.0174716, 0.0501928, 0.0930275),
CoordType(0.0242103, 0.250062, 0.126256), CoordType(0.0108188, 0.152774, 0.167914),
CoordType(5.41687e-05, 0.00137834, 0.175119)
};
const int connectivitySize = 57;
vtkm::Id pointId[connectivitySize] = { 0, 1, 3, 2, 3, 1, 4, 5, 0, 1, 0, 5, 7, 8, 6,
9, 6, 8, 0, 10, 7, 11, 7, 10, 0, 6, 13, 12, 13, 6,
1, 5, 14, 1, 14, 2, 0, 3, 15, 0, 13, 4, 6, 16, 12,
6, 9, 16, 7, 11, 8, 0, 15, 10, 7, 6, 0 };
// create DataSet
vtkm::cont::DataSet dataSet;
dataSet.AddCoordinateSystem(
vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On));
vtkm::cont::ArrayHandle<vtkm::Id> connectivity;
connectivity.Allocate(connectivitySize);
for (vtkm::Id i = 0; i < connectivitySize; ++i)
{
connectivity.WritePortal().Set(i, pointId[i]);
}
vtkm::cont::CellSetSingleType<> cellSet;
cellSet.Fill(nVerts, vtkm::CELL_SHAPE_TRIANGLE, 3, connectivity);
dataSet.SetCellSet(cellSet);
std::vector<vtkm::Float32> pointvar(nVerts);
std::iota(pointvar.begin(), pointvar.end(), 15.f);
std::vector<vtkm::Float32> cellvar(connectivitySize / 3);
std::iota(cellvar.begin(), cellvar.end(), 132.f);
vtkm::cont::ArrayHandle<vtkm::Vec3f> pointvec;
pointvec.Allocate(nVerts);
SetPortal(pointvec.WritePortal());
vtkm::cont::ArrayHandle<vtkm::Vec3f> cellvec;
cellvec.Allocate(connectivitySize / 3);
SetPortal(cellvec.WritePortal());
dataSet.AddPointField("pointvar", pointvar);
dataSet.AddCellField("cellvar", cellvar);
dataSet.AddPointField("point_vectors", pointvec);
dataSet.AddCellField("cell_vectors", cellvec);
return dataSet;
}
inline vtkm::cont::DataSet Make3DRectilinearDataSet0()
{
vtkm::cont::DataSetBuilderRectilinear dsb;
std::vector<vtkm::Float32> X(3), Y(2), Z(3);
X[0] = 0.0f;
X[1] = 1.0f;
X[2] = 2.0f;
Y[0] = 0.0f;
Y[1] = 1.0f;
Z[0] = 0.0f;
Z[1] = 1.0f;
Z[2] = 2.0f;
vtkm::cont::DataSet dataSet = dsb.Create(X, Y, Z);
const vtkm::Id nVerts = 18;
vtkm::Float32 var[nVerts];
for (int i = 0; i < nVerts; i++)
var[i] = (vtkm::Float32)i;
dataSet.AddPointField("pointvar", var, nVerts);
const vtkm::Id nCells = 4;
vtkm::Float32 cellvar[nCells];
for (int i = 0; i < nCells; i++)
cellvar[i] = (vtkm::Float32)i;
dataSet.AddCellField("cellvar", cellvar, nCells);
return dataSet;
}
#endif