From 4522f918074c44e69376b7c4a7378c5f5abdce9c Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Thu, 19 Jan 2023 15:20:27 -0500 Subject: [PATCH 01/11] Begin migrating vtkh image compositor over --- examples/demo/Demo.cxx | 7 +- vtkm/rendering/CMakeLists.txt | 15 + vtkm/rendering/compositing/Compositor.cxx | 208 +++++ vtkm/rendering/compositing/Compositor.h | 104 +++ .../compositing/DirectSendCompositor.cxx | 173 ++++ .../compositing/DirectSendCompositor.h | 24 + vtkm/rendering/compositing/Image.cxx | 35 + vtkm/rendering/compositing/Image.h | 333 ++++++++ vtkm/rendering/compositing/ImageCompositor.h | 222 +++++ vtkm/rendering/compositing/PNGEncoder.cxx | 246 ++++++ vtkm/rendering/compositing/PNGEncoder.h | 57 ++ .../compositing/PayloadCompositor.cxx | 62 ++ .../rendering/compositing/PayloadCompositor.h | 28 + vtkm/rendering/compositing/PayloadImage.cxx | 33 + vtkm/rendering/compositing/PayloadImage.h | 211 +++++ .../compositing/PayloadImageCompositor.h | 74 ++ .../compositing/RadixKCompositor.cxx | 208 +++++ vtkm/rendering/compositing/RadixKCompositor.h | 54 ++ vtkm/rendering/compositing/vtkm_diy_collect.h | 88 ++ .../compositing/vtkm_diy_image_block.h | 194 +++++ .../compositing/vtkm_diy_partial_blocks.h | 216 +++++ .../compositing/vtkm_diy_partial_collect.h | 185 +++++ .../vtkm_diy_partial_redistribute.h | 226 +++++ vtkm/rendering/compositing/vtkm_diy_utils.h | 49 ++ vtkm/rendering/testing/CMakeLists.txt | 13 + .../testing/UnitTestImageCompositing.cxx | 199 +++++ vtkm/rendering/testing/t_vtkm_test_utils.h | 773 ++++++++++++++++++ 27 files changed, 4036 insertions(+), 1 deletion(-) create mode 100644 vtkm/rendering/compositing/Compositor.cxx create mode 100644 vtkm/rendering/compositing/Compositor.h create mode 100644 vtkm/rendering/compositing/DirectSendCompositor.cxx create mode 100644 vtkm/rendering/compositing/DirectSendCompositor.h create mode 100644 vtkm/rendering/compositing/Image.cxx create mode 100644 vtkm/rendering/compositing/Image.h create mode 100644 vtkm/rendering/compositing/ImageCompositor.h create mode 100644 vtkm/rendering/compositing/PNGEncoder.cxx create mode 100644 vtkm/rendering/compositing/PNGEncoder.h create mode 100644 vtkm/rendering/compositing/PayloadCompositor.cxx create mode 100644 vtkm/rendering/compositing/PayloadCompositor.h create mode 100644 vtkm/rendering/compositing/PayloadImage.cxx create mode 100644 vtkm/rendering/compositing/PayloadImage.h create mode 100644 vtkm/rendering/compositing/PayloadImageCompositor.h create mode 100644 vtkm/rendering/compositing/RadixKCompositor.cxx create mode 100644 vtkm/rendering/compositing/RadixKCompositor.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_collect.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_image_block.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_partial_blocks.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_partial_collect.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_partial_redistribute.h create mode 100644 vtkm/rendering/compositing/vtkm_diy_utils.h create mode 100644 vtkm/rendering/testing/UnitTestImageCompositing.cxx create mode 100644 vtkm/rendering/testing/t_vtkm_test_utils.h diff --git a/examples/demo/Demo.cxx b/examples/demo/Demo.cxx index 08f32717d..5f763d6ff 100644 --- a/examples/demo/Demo.cxx +++ b/examples/demo/Demo.cxx @@ -8,6 +8,8 @@ // PURPOSE. See the above copyright notice for more information. //============================================================================ +#include + #include #include @@ -39,6 +41,9 @@ int main(int argc, char* argv[]) vtkm::cont::DataSet tangleData = tangle.Execute(); std::string fieldName = "tangle"; + vtkm::io::VTKDataSetWriter writer("tangle.vtk"); + writer.WriteDataSet(tangleData); + // Set up a camera for rendering the input data vtkm::rendering::Camera camera; camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5)); @@ -57,7 +62,7 @@ int main(int argc, char* argv[]) vtkm::rendering::Scene scene; scene.AddActor(actor); // 2048x2048 pixels in the canvas: - CanvasRayTracer canvas(2048, 2048); + CanvasRayTracer canvas(512, 512); // Create a view and use it to render the input data using OS Mesa vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg); diff --git a/vtkm/rendering/CMakeLists.txt b/vtkm/rendering/CMakeLists.txt index e19985206..8389c2caf 100644 --- a/vtkm/rendering/CMakeLists.txt +++ b/vtkm/rendering/CMakeLists.txt @@ -55,6 +55,10 @@ set(headers View3D.h Wireframer.h WorldAnnotator.h + + compositing/Compositor.h + compositing/Image.h + compositing/PNGEncoder.h ) set(sources @@ -86,6 +90,13 @@ set(sources raytracing/Logger.cxx raytracing/MeshConnectivityContainers.cxx raytracing/TriangleExtractor.cxx + + compositing/Compositor.cxx + compositing/Image.cxx + compositing/PNGEncoder.cxx + compositing/RadixKCompositor.cxx + compositing/PayloadCompositor.cxx + compositing/PayloadImage.cxx ) # This list of sources has code that uses devices and so might need to be @@ -147,6 +158,10 @@ if(UNIX AND NOT APPLE) target_link_libraries(vtkm_rendering PRIVATE rt) endif() +if (VTKm_ENABLE_MPI) + target_link_libraries(vtkm_rendering PUBLIC MPI::MPI_CXX) +endif() + #----------------------------------------------------------------------------- add_subdirectory(internal) add_subdirectory(raytracing) diff --git a/vtkm/rendering/compositing/Compositor.cxx b/vtkm/rendering/compositing/Compositor.cxx new file mode 100644 index 000000000..3fd314950 --- /dev/null +++ b/vtkm/rendering/compositing/Compositor.cxx @@ -0,0 +1,208 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include + +#include +#include +#include + +#ifdef VTKM_ENABLE_MPI +#include +#include +//#include +#include +#endif + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +Compositor::Compositor() + : m_composite_mode(Z_BUFFER_SURFACE) +{ +} + +Compositor::~Compositor() {} + +void Compositor::SetCompositeMode(CompositeMode composite_mode) +{ + // assure we don't have mixed image types + assert(m_images.size() == 0); + m_composite_mode = composite_mode; +} + +void Compositor::ClearImages() +{ + m_images.clear(); +} + +void Compositor::AddImage(const unsigned char* color_buffer, + const float* depth_buffer, + const int width, + const int height) +{ + assert(m_composite_mode != VIS_ORDER_BLEND); + assert(depth_buffer != NULL); + Image image; + if (m_images.size() == 0) + { + m_images.push_back(image); + m_images[0].Init(color_buffer, depth_buffer, width, height); + //m_images[0].Save("first.png"); + } + else if (m_composite_mode == Z_BUFFER_SURFACE) + { + // + // Do local composite and keep a single image + // + image.Init(color_buffer, depth_buffer, width, height); + vtkm::rendering::compositing::ImageCompositor compositor; + compositor.ZBufferComposite(m_images[0], image); + } + else + { + const size_t image_index = m_images.size(); + m_images.push_back(image); + m_images[image_index].Init(color_buffer, depth_buffer, width, height); + } +} + +void Compositor::AddImage(const float* color_buffer, + const float* depth_buffer, + const int width, + const int height) +{ + assert(m_composite_mode != VIS_ORDER_BLEND); + assert(depth_buffer != NULL); + Image image; + if (m_images.size() == 0) + { + m_images.push_back(image); + m_images[0].Init(color_buffer, depth_buffer, width, height); + } + else if (m_composite_mode == Z_BUFFER_SURFACE) + { + // + // Do local composite and keep a single image + // + image.Init(color_buffer, depth_buffer, width, height); + + vtkm::rendering::compositing::ImageCompositor compositor; + compositor.ZBufferComposite(m_images[0], image); + } + else + { + const size_t image_index = m_images.size(); + m_images.push_back(image); + m_images[image_index].Init(color_buffer, depth_buffer, width, height); + } +} + +void Compositor::AddImage(const unsigned char* color_buffer, + const float* depth_buffer, + const int width, + const int height, + const int vis_order) +{ + assert(m_composite_mode == VIS_ORDER_BLEND); + Image image; + const size_t image_index = m_images.size(); + m_images.push_back(image); + m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); +} + +void Compositor::AddImage(const float* color_buffer, + const float* depth_buffer, + const int width, + const int height, + const int vis_order) +{ + assert(m_composite_mode == VIS_ORDER_BLEND); + Image image; + const size_t image_index = m_images.size(); + m_images.push_back(image); + + m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); +} + +Image Compositor::Composite() +{ + assert(m_images.size() != 0); + + if (m_composite_mode == Z_BUFFER_SURFACE) + { + CompositeZBufferSurface(); + } + else if (m_composite_mode == Z_BUFFER_BLEND) + { + CompositeZBufferBlend(); + } + else if (m_composite_mode == VIS_ORDER_BLEND) + { + CompositeVisOrder(); + } + // Make this a param to avoid the copy? + return m_images[0]; +} + +void Compositor::Cleanup() {} + +std::string Compositor::GetLogString() +{ + std::string res = m_log_stream.str(); + m_log_stream.str(""); + return res; +} + +void Compositor::CompositeZBufferSurface() +{ + // nothing to do here in serial. Images were composited as + // they were added to the compositor +#ifdef VTKM_ENABLE_MPI + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + + assert(m_images.size() == 1); + RadixKCompositor compositor; + compositor.CompositeSurface(comm, this->m_images[0]); + m_log_stream << compositor.GetTimingString(); +#endif +} + +void Compositor::CompositeZBufferBlend() +{ + throw vtkm::cont::ErrorBadValue("Not implemented"); +} + +void Compositor::CompositeVisOrder() +{ + +#ifdef VTKM_ENABLE_MPI + /* + vtkhdiy::mpi::communicator diy_comm; + diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle())); + + assert(m_images.size() != 0); + DirectSendCompositor compositor; + compositor.CompositeVolume(diy_comm, this->m_images); + */ +#else + vtkm::rendering::compositing::ImageCompositor compositor; + compositor.OrderedComposite(m_images); +#endif +} + +} +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/Compositor.h b/vtkm/rendering/compositing/Compositor.h new file mode 100644 index 000000000..6db06babc --- /dev/null +++ b/vtkm/rendering/compositing/Compositor.h @@ -0,0 +1,104 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_Compositor_h +#define vtk_m_rendering_compositing_Compositor_h + +#include + +#include + +#ifdef VTKM_ENABLE_MPI +#include +#endif + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +class VTKM_RENDERING_EXPORT Compositor +{ +public: + enum CompositeMode + { + Z_BUFFER_SURFACE, // zbuffer composite no transparency + Z_BUFFER_BLEND, // zbuffer composite with transparency + VIS_ORDER_BLEND // blend images in a specific order + }; + Compositor(); + + virtual ~Compositor(); + + void SetCompositeMode(CompositeMode composite_mode); + + void ClearImages(); + + void AddImage(const unsigned char* color_buffer, + const float* depth_buffer, + const int width, + const int height); + + void AddImage(const float* color_buffer, + const float* depth_buffer, + const int width, + const int height); + + void AddImage(const unsigned char* color_buffer, + const float* depth_buffer, + const int width, + const int height, + const int vis_order); + + void AddImage(const float* color_buffer, + const float* depth_buffer, + const int width, + const int height, + const int vis_order); + + Image Composite(); + + virtual void Cleanup(); + + std::string GetLogString(); + + unsigned char* ConvertBuffer(const float* buffer, const int size) + { + unsigned char* ubytes = new unsigned char[size]; + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + ubytes[i] = static_cast(buffer[i] * 255.f); + } + + return ubytes; + } + +protected: + virtual void CompositeZBufferSurface(); + virtual void CompositeZBufferBlend(); + virtual void CompositeVisOrder(); + + std::stringstream m_log_stream; + CompositeMode m_composite_mode; + std::vector m_images; +}; + +} +} +} //namespace vtkm::rendering::compositing + + +#endif //vtk_m_rendering_compositing_Compositor_h diff --git a/vtkm/rendering/compositing/DirectSendCompositor.cxx b/vtkm/rendering/compositing/DirectSendCompositor.cxx new file mode 100644 index 000000000..56d5e70da --- /dev/null +++ b/vtkm/rendering/compositing/DirectSendCompositor.cxx @@ -0,0 +1,173 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace vtkh +{ + +struct Redistribute +{ + typedef vtkhdiy::RegularDecomposer Decomposer; + const vtkhdiy::RegularDecomposer& m_decomposer; + Redistribute(const Decomposer& decomposer) + : m_decomposer(decomposer) + { + } + + void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const + { + MultiImageBlock* block = static_cast(v_block); + // + // first round we have no incoming. Take the image we have, + // chop it up into pieces, and send it to the domain resposible + // for that portion + // + const int world_size = m_decomposer.nblocks; + const int local_images = block->m_images.size(); + if (proxy.in_link().size() == 0) + { + std::map> outgoing; + + for (int i = 0; i < world_size; ++i) + { + vtkhdiy::DiscreteBounds sub_image_bounds; + m_decomposer.fill_bounds(sub_image_bounds, i); + vtkm::Bounds vtkm_sub_bounds = DIYBoundsToVTKM(sub_image_bounds); + + vtkhdiy::BlockID dest = proxy.out_link().target(i); + outgoing[dest].resize(local_images); + + for (int img = 0; img < local_images; ++img) + { + outgoing[dest][img].SubsetFrom(block->m_images[img], vtkm_sub_bounds); + } + } //for + + typename std::map>::iterator it; + for (it = outgoing.begin(); it != outgoing.end(); ++it) + { + proxy.enqueue(it->first, it->second); + } + } // if + else if (block->m_images.at(0).m_composite_order != -1) + { + // blend images according to vis order + std::vector images; + for (int i = 0; i < proxy.in_link().size(); ++i) + { + + std::vector incoming; + int gid = proxy.in_link().target(i).gid; + proxy.dequeue(gid, incoming); + const int in_size = incoming.size(); + for (int img = 0; img < in_size; ++img) + { + images.emplace_back(incoming[img]); + //std::cout<<"rank "<m_output.Swap(images[0]); + } // else if + else if (block->m_images.at(0).m_composite_order == -1 && + block->m_images.at(0).HasTransparency()) + { + std::vector images; + for (int i = 0; i < proxy.in_link().size(); ++i) + { + + std::vector incoming; + int gid = proxy.in_link().target(i).gid; + proxy.dequeue(gid, incoming); + const int in_size = incoming.size(); + for (int img = 0; img < in_size; ++img) + { + images.emplace_back(incoming[img]); + //std::cout<<"rank "<& images) +{ + vtkhdiy::DiscreteBounds global_bounds = VTKMBoundsToDIY(images.at(0).m_orig_bounds); + + const int num_threads = 1; + const int num_blocks = diy_comm.size(); + const int magic_k = 8; + Image sub_image; + // + // DIY does not seem to like being called with different block types + // so we isolate them within separate blocks + // + { + vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { + ImageBlock* block = reinterpret_cast*>(b); + delete block; + }); + + // create an assigner with one block per rank + vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + + AddMultiImageBlock create(master, images, sub_image); + + const int dims = 2; + vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + decomposer.decompose(diy_comm.rank(), assigner, create); + + vtkhdiy::all_to_all(master, assigner, Redistribute(decomposer), magic_k); + } + + { + vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { + ImageBlock* block = reinterpret_cast*>(b); + delete block; + }); + vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + + const int dims = 2; + vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + AddImageBlock all_create(master, sub_image); + decomposer.decompose(diy_comm.rank(), assigner, all_create); + MPI_Barrier(diy_comm); + + //MPICollect(sub_image,diy_comm); + vtkhdiy::all_to_all(master, assigner, CollectImages(decomposer), magic_k); + } + + images.at(0).Swap(sub_image); +} + +std::string DirectSendCompositor::GetTimingString() +{ + std::string res(m_timing_log.str()); + m_timing_log.str(""); + return res; +} + +} diff --git a/vtkm/rendering/compositing/DirectSendCompositor.h b/vtkm/rendering/compositing/DirectSendCompositor.h new file mode 100644 index 000000000..2372ff42f --- /dev/null +++ b/vtkm/rendering/compositing/DirectSendCompositor.h @@ -0,0 +1,24 @@ +#ifndef VTKH_DIY_DIRECT_SEND_HPP +#define VTKH_DIY_DIRECT_SEND_HPP + +#include +#include +#include + +namespace vtkh +{ + +class DirectSendCompositor +{ +public: + DirectSendCompositor(); + ~DirectSendCompositor(); + void CompositeVolume(vtkhdiy::mpi::communicator& diy_comm, std::vector& images); + std::string GetTimingString(); + +private: + std::stringstream m_timing_log; +}; + +} // namespace vtkh +#endif diff --git a/vtkm/rendering/compositing/Image.cxx b/vtkm/rendering/compositing/Image.cxx new file mode 100644 index 000000000..8e1c838b5 --- /dev/null +++ b/vtkm/rendering/compositing/Image.cxx @@ -0,0 +1,35 @@ +// See License.txt + +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +void Image::Save(const std::string& name, const std::vector& comments) +{ + PNGEncoder encoder; + encoder.Encode(&m_pixels[0], + m_bounds.X.Max - m_bounds.X.Min + 1, + m_bounds.Y.Max - m_bounds.Y.Min + 1, + comments); + encoder.Save(name); +} + +void Image::Save(const std::string& name, const std::vector& comments) const +{ + PNGEncoder encoder; + encoder.Encode(&m_pixels[0], + m_bounds.X.Max - m_bounds.X.Min + 1, + m_bounds.Y.Max - m_bounds.Y.Min + 1, + comments); + encoder.Save(name); +} + +} +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/Image.h b/vtkm/rendering/compositing/Image.h new file mode 100644 index 000000000..2ce6b8e20 --- /dev/null +++ b/vtkm/rendering/compositing/Image.h @@ -0,0 +1,333 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_Image_h +#define vtk_m_rendering_compositing_Image_h + +#include + +#include +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +struct VTKM_RENDERING_EXPORT Image +{ + // The image bounds are indicated by a grid starting at + // 1-width and 1-height. Actual width would be calculated + // m_bounds.X.Max - m_bounds.X.Min + 1 + // 1024 - 1 + 1 = 1024 + vtkm::Bounds m_orig_bounds; + vtkm::Bounds m_bounds; + std::vector m_pixels; + std::vector m_depths; + int m_orig_rank; + bool m_has_transparency; + int m_composite_order; + + Image() + : m_orig_rank(-1) + , m_has_transparency(false) + , m_composite_order(-1) + { + } + + + Image(const vtkm::Bounds& bounds) + : m_orig_bounds(bounds) + , m_bounds(bounds) + , m_orig_rank(-1) + , m_has_transparency(false) + , m_composite_order(-1) + + { + const int dx = bounds.X.Max - bounds.X.Min + 1; + const int dy = bounds.Y.Max - bounds.Y.Min + 1; + m_pixels.resize(dx * dy * 4); + m_depths.resize(dx * dy); + } + + // init this image based on the original bounds + // of the other image + void InitOriginal(const Image& other) + { + m_orig_bounds = other.m_orig_bounds; + m_bounds = other.m_orig_bounds; + + const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + m_pixels.resize(dx * dy * 4); + m_depths.resize(dx * dy); + + m_orig_rank = -1; + m_has_transparency = false; + m_composite_order = -1; + } + + int GetNumberOfPixels() const { return static_cast(m_pixels.size() / 4); } + + void SetHasTransparency(bool has_transparency) { m_has_transparency = has_transparency; } + + bool HasTransparency() { return m_has_transparency; } + + void Init(const float* color_buffer, + const float* depth_buffer, + int width, + int height, + int composite_order = -1) + { + m_composite_order = composite_order; + m_bounds.X.Min = 1; + m_bounds.Y.Min = 1; + m_bounds.X.Max = width; + m_bounds.Y.Max = height; + m_orig_bounds = m_bounds; + const int size = width * height; + m_pixels.resize(size * 4); + m_depths.resize(size); + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + const int offset = i * 4; + m_pixels[offset + 0] = static_cast(color_buffer[offset + 0] * 255.f); + m_pixels[offset + 1] = static_cast(color_buffer[offset + 1] * 255.f); + m_pixels[offset + 2] = static_cast(color_buffer[offset + 2] * 255.f); + m_pixels[offset + 3] = static_cast(color_buffer[offset + 3] * 255.f); + float depth = depth_buffer[i]; + //make sure we can do a single comparison on depth + //deal with negative depth values + //TODO: This may not be the best way + depth = depth < 0 ? abs(depth) : depth; + m_depths[i] = depth; + } + } + + void Init(const unsigned char* color_buffer, + const float* depth_buffer, + int width, + int height, + int composite_order = -1) + { + m_composite_order = composite_order; + m_bounds.X.Min = 1; + m_bounds.Y.Min = 1; + m_bounds.X.Max = width; + m_bounds.Y.Max = height; + m_orig_bounds = m_bounds; + + const int size = width * height; + m_pixels.resize(size * 4); + m_depths.resize(size); + + std::copy(color_buffer, color_buffer + size * 4, &m_pixels[0]); + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + float depth = depth_buffer[i]; + //make sure we can do a single comparison on depth + depth = depth < 0 ? 2.f : depth; + m_depths[i] = depth; + } // for + } + + + void CompositeBackground(const float* color) + { + + const int size = static_cast(m_pixels.size() / 4); + unsigned char bg_color[4]; + for (int i = 0; i < 4; ++i) + { + bg_color[i] = static_cast(color[i] * 255.f); + } + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + const int offset = i * 4; + unsigned int alpha = static_cast(m_pixels[offset + 3]); + const float opacity = (255 - alpha); + m_pixels[offset + 0] += static_cast(opacity * bg_color[0] / 255); + m_pixels[offset + 1] += static_cast(opacity * bg_color[1] / 255); + m_pixels[offset + 2] += static_cast(opacity * bg_color[2] / 255); + m_pixels[offset + 3] += static_cast(opacity * bg_color[3] / 255); + } + } + // + // Fill this image with a sub-region of another image + // + void SubsetFrom(const Image& image, const vtkm::Bounds& sub_region) + { + m_orig_bounds = image.m_orig_bounds; + m_bounds = sub_region; + m_orig_rank = image.m_orig_rank; + m_composite_order = image.m_composite_order; + + assert(sub_region.X.Min >= image.m_bounds.X.Min); + assert(sub_region.Y.Min >= image.m_bounds.Y.Min); + assert(sub_region.X.Max <= image.m_bounds.X.Max); + assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + + const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + + const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; + //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + + const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; + const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int end_y = start_y + s_dy; + + m_pixels.resize(s_dx * s_dy * 4); + m_depths.resize(s_dx * s_dy); + + + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = start_y; y < end_y; ++y) + { + const int copy_to = (y - start_y) * s_dx; + const int copy_from = y * dx + start_x; + + std::copy(&image.m_pixels[copy_from * 4], + &image.m_pixels[copy_from * 4] + s_dx * 4, + &m_pixels[copy_to * 4]); + std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + } + } + + void Color(int color) + { + unsigned char c[4]; + c[3] = 255; + + c[0] = 0; + c[1] = 0; + c[2] = 0; + int index = color % 3; + c[index] = 255 - color * 11; + ; + const int size = static_cast(m_pixels.size()); + for (int i = 0; i < size; ++i) + { + float d = m_depths[i / 4]; + if (d > 0 && d < 1) + { + m_pixels[i] = c[i % 4]; + } + else + { + m_pixels[i] = 155; + } + } + } + // + // Fills the passed in image with the contents of this image + // + void SubsetTo(Image& image) const + { + image.m_composite_order = m_composite_order; + assert(m_bounds.X.Min >= image.m_bounds.X.Min); + assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); + assert(m_bounds.X.Max <= image.m_bounds.X.Max); + assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + + const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + + const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; + //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + + const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; + const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = 0; y < s_dy; ++y) + { + const int copy_to = (y + start_y) * dx + start_x; + const int copy_from = y * s_dx; + + std::copy(&m_pixels[copy_from * 4], + &m_pixels[copy_from * 4] + s_dx * 4, + &image.m_pixels[copy_to * 4]); + + std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + } + } + + void Swap(Image& other) + { + vtkm::Bounds orig = m_orig_bounds; + vtkm::Bounds bounds = m_bounds; + + m_orig_bounds = other.m_orig_bounds; + m_bounds = other.m_bounds; + + other.m_orig_bounds = orig; + other.m_bounds = bounds; + + m_pixels.swap(other.m_pixels); + m_depths.swap(other.m_depths); + } + + void Clear() + { + vtkm::Bounds empty; + m_orig_bounds = empty; + m_bounds = empty; + m_pixels.clear(); + m_depths.clear(); + } + + std::string ToString() const + { + std::stringstream ss; + ss << "Total size pixels " << (int)m_pixels.size() / 4; + ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; + ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ; + return ss.str(); + } + + void Save(const std::string& name, const std::vector& comments) const; + void Save(const std::string& name, const std::vector& comments); +}; + +struct CompositeOrderSort +{ + inline bool operator()(const Image& lhs, const Image& rhs) const + { + return lhs.m_composite_order < rhs.m_composite_order; + } +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_Image_h diff --git a/vtkm/rendering/compositing/ImageCompositor.h b/vtkm/rendering/compositing/ImageCompositor.h new file mode 100644 index 000000000..40fdf176c --- /dev/null +++ b/vtkm/rendering/compositing/ImageCompositor.h @@ -0,0 +1,222 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_ImageCompositor_h +#define vtk_m_rendering_compositing_ImageCompositor_h + +#include + +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +class VTKM_RENDERING_EXPORT ImageCompositor +{ +public: + void Blend(vtkm::rendering::compositing::Image& front, vtkm::rendering::compositing::Image& back) + { + + assert(front.m_bounds.X.Min == back.m_bounds.X.Min); + assert(front.m_bounds.Y.Min == back.m_bounds.Y.Min); + assert(front.m_bounds.X.Max == back.m_bounds.X.Max); + assert(front.m_bounds.Y.Max == back.m_bounds.Y.Max); + const int size = static_cast(front.m_pixels.size() / 4); + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + const int offset = i * 4; + unsigned int alpha = front.m_pixels[offset + 3]; + const unsigned int opacity = 255 - alpha; + + front.m_pixels[offset + 0] += + static_cast(opacity * back.m_pixels[offset + 0] / 255); + front.m_pixels[offset + 1] += + static_cast(opacity * back.m_pixels[offset + 1] / 255); + front.m_pixels[offset + 2] += + static_cast(opacity * back.m_pixels[offset + 2] / 255); + front.m_pixels[offset + 3] += + static_cast(opacity * back.m_pixels[offset + 3] / 255); + + float d1 = std::min(front.m_depths[i], 1.001f); + float d2 = std::min(back.m_depths[i], 1.001f); + float depth = std::min(d1, d2); + front.m_depths[i] = depth; + } + } + + void ZBufferComposite(vtkm::rendering::compositing::Image& front, + const vtkm::rendering::compositing::Image& image) + { + assert(front.m_depths.size() == front.m_pixels.size() / 4); + assert(front.m_bounds.X.Min == image.m_bounds.X.Min); + assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); + assert(front.m_bounds.X.Max == image.m_bounds.X.Max); + assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + + const int size = static_cast(front.m_depths.size()); + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + const float depth = image.m_depths[i]; + if (depth > 1.f || front.m_depths[i] < depth) + { + continue; + } + const int offset = i * 4; + front.m_depths[i] = abs(depth); + front.m_pixels[offset + 0] = image.m_pixels[offset + 0]; + front.m_pixels[offset + 1] = image.m_pixels[offset + 1]; + front.m_pixels[offset + 2] = image.m_pixels[offset + 2]; + front.m_pixels[offset + 3] = image.m_pixels[offset + 3]; + } + } + + void OrderedComposite(std::vector& images) + { + const int total_images = images.size(); + std::sort(images.begin(), images.end(), CompositeOrderSort()); + for (int i = 1; i < total_images; ++i) + { + Blend(images[0], images[i]); + } + } + + void ZBufferComposite(std::vector& images) + { + const int total_images = images.size(); + for (int i = 1; i < total_images; ++i) + { + ZBufferComposite(images[0], images[i]); + } + } + + struct Pixel + { + unsigned char m_color[4]; + float m_depth; + int m_pixel_id; // local (sub-image) pixels id + + bool operator<(const Pixel& other) const + { + if (m_pixel_id != other.m_pixel_id) + { + return m_pixel_id < other.m_pixel_id; + } + else + { + return m_depth < other.m_depth; + } + } + }; + + void CombineImages(const std::vector& images, + std::vector& pixels) + { + + const int num_images = static_cast(images.size()); + for (int i = 0; i < num_images; ++i) + { + // + // Extract the partial composites into a contiguous array + // + + const int image_size = images[i].GetNumberOfPixels(); + const int offset = i * image_size; +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int j = 0; j < image_size; ++j) + { + const int image_offset = j * 4; + pixels[offset + j].m_color[0] = images[i].m_pixels[image_offset + 0]; + pixels[offset + j].m_color[1] = images[i].m_pixels[image_offset + 1]; + pixels[offset + j].m_color[2] = images[i].m_pixels[image_offset + 2]; + pixels[offset + j].m_color[3] = images[i].m_pixels[image_offset + 3]; + pixels[offset + j].m_depth = images[i].m_depths[j]; + pixels[offset + j].m_pixel_id = j; + } // for pixels + } // for images + } + + void ZBufferBlend(std::vector& images) + { + const int image_pixels = images[0].GetNumberOfPixels(); + const int num_images = static_cast(images.size()); + std::vector pixels; + CombineImages(images, pixels); +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < image_pixels; ++i) + { + const int begin = image_pixels * i; + const int end = image_pixels * i - 1; + std::sort(pixels.begin() + begin, pixels.begin() + end); + } + + // check to see if that worked + int pixel_id_0 = pixels[0].m_pixel_id; + for (int i = 1; i < num_images; ++i) + { + assert(pixel_id_0 == pixels[i].m_pixel_id); + } + + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < image_pixels; ++i) + { + const int index = i * num_images; + Pixel pixel = pixels[index]; + for (int j = 1; j < num_images; ++j) + { + if (pixel.m_color[3] == 255 || pixel.m_depth > 1.f) + { + break; + } + unsigned int alpha = pixel.m_color[3]; + const unsigned int opacity = 255 - alpha; + pixel.m_color[0] += + static_cast(opacity * pixels[index + j].m_color[0] / 255); + pixel.m_color[1] += + static_cast(opacity * pixels[index + j].m_color[1] / 255); + pixel.m_color[2] += + static_cast(opacity * pixels[index + j].m_color[2] / 255); + pixel.m_color[3] += + static_cast(opacity * pixels[index + j].m_color[3] / 255); + pixel.m_depth = pixels[index + j].m_depth; + } // for each image + images[0].m_pixels[i * 4 + 0] = pixel.m_color[0]; + images[0].m_pixels[i * 4 + 1] = pixel.m_color[1]; + images[0].m_pixels[i * 4 + 2] = pixel.m_color[2]; + images[0].m_pixels[i * 4 + 3] = pixel.m_color[3]; + images[0].m_depths[i] = pixel.m_depth; + } // for each pixel + } +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_ImageComposititing_h diff --git a/vtkm/rendering/compositing/PNGEncoder.cxx b/vtkm/rendering/compositing/PNGEncoder.cxx new file mode 100644 index 000000000..64b920694 --- /dev/null +++ b/vtkm/rendering/compositing/PNGEncoder.cxx @@ -0,0 +1,246 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include + +#include +#include + +VTKM_THIRDPARTY_PRE_INCLUDE +#include +VTKM_THIRDPARTY_POST_INCLUDE + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +PNGEncoder::PNGEncoder() + : m_buffer(NULL) + , m_buffer_size(0) +{ +} + +PNGEncoder::~PNGEncoder() +{ + Cleanup(); +} + +void PNGEncoder::Encode(const unsigned char* rgba_in, const int width, const int height) +{ + Cleanup(); + + // upside down relative to what lodepng wants + unsigned char* rgba_flip = new unsigned char[width * height * 4]; + + for (int y = 0; y < height; ++y) + { + memcpy(&(rgba_flip[y * width * 4]), &(rgba_in[(height - y - 1) * width * 4]), width * 4); + } + + vtkm::png::LodePNGState state; + vtkm::png::lodepng_state_init(&state); + // use less aggressive compression + state.encoder.zlibsettings.btype = 2; + state.encoder.zlibsettings.use_lz77 = 0; + + unsigned error = lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state); + delete[] rgba_flip; + + if (error) + { + std::cerr << "lodepng_encode_memory failed\n"; + } +} + +void PNGEncoder::Encode(const float* rgba_in, const int width, const int height) +{ + Cleanup(); + + // upside down relative to what lodepng wants + unsigned char* rgba_flip = new unsigned char[width * height * 4]; + + + for (int x = 0; x < width; ++x) + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = 0; y < height; ++y) + { + int inOffset = (y * width + x) * 4; + int outOffset = ((height - y - 1) * width + x) * 4; + rgba_flip[outOffset + 0] = (unsigned char)(rgba_in[inOffset + 0] * 255.f); + rgba_flip[outOffset + 1] = (unsigned char)(rgba_in[inOffset + 1] * 255.f); + rgba_flip[outOffset + 2] = (unsigned char)(rgba_in[inOffset + 2] * 255.f); + rgba_flip[outOffset + 3] = (unsigned char)(rgba_in[inOffset + 3] * 255.f); + } + + vtkm::png::LodePNGState state; + vtkm::png::lodepng_state_init(&state); + // use less aggressive compression + state.encoder.zlibsettings.btype = 2; + state.encoder.zlibsettings.use_lz77 = 0; + + unsigned error = lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state); + delete[] rgba_flip; + + if (error) + { + std::cerr << "lodepng_encode_memory failed\n"; + } +} + +void PNGEncoder::Encode(const unsigned char* rgba_in, + const int width, + const int height, + const std::vector& comments) +{ + Cleanup(); + + // upside down relative to what lodepng wants + unsigned char* rgba_flip = new unsigned char[width * height * 4]; + + for (int y = 0; y < height; ++y) + { + memcpy(&(rgba_flip[y * width * 4]), &(rgba_in[(height - y - 1) * width * 4]), width * 4); + } + + vtkm::png::LodePNGState state; + vtkm::png::lodepng_state_init(&state); + // use less aggressive compression + state.encoder.zlibsettings.btype = 2; + state.encoder.zlibsettings.use_lz77 = 0; + if (comments.size() % 2 != 0) + { + std::cerr << "PNGEncoder::Encode comments missing value for the last key.\n"; + std::cerr << "Ignoring the last key.\n"; + } + if (comments.size() > 1) + { + vtkm::png::lodepng_info_init(&state.info_png); + // Comments are in pairs with a key and a value, using + // comments.size()-1 ensures that we don't use the last + // comment if the length of the vector isn't a multiple of 2. + for (int i = 0; i < comments.size() - 1; i += 2) + vtkm::png::lodepng_add_text(&state.info_png, comments[i].c_str(), comments[i + 1].c_str()); + } + + unsigned error = + vtkm::png::lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state); + delete[] rgba_flip; + + if (error) + { + std::cerr << "lodepng_encode_memory failed\n"; + } +} + +void PNGEncoder::Encode(const float* rgba_in, + const int width, + const int height, + const std::vector& comments) +{ + Cleanup(); + + // upside down relative to what lodepng wants + unsigned char* rgba_flip = new unsigned char[width * height * 4]; + + + for (int x = 0; x < width; ++x) + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = 0; y < height; ++y) + { + int inOffset = (y * width + x) * 4; + int outOffset = ((height - y - 1) * width + x) * 4; + rgba_flip[outOffset + 0] = (unsigned char)(rgba_in[inOffset + 0] * 255.f); + rgba_flip[outOffset + 1] = (unsigned char)(rgba_in[inOffset + 1] * 255.f); + rgba_flip[outOffset + 2] = (unsigned char)(rgba_in[inOffset + 2] * 255.f); + rgba_flip[outOffset + 3] = (unsigned char)(rgba_in[inOffset + 3] * 255.f); + } + + vtkm::png::LodePNGState state; + vtkm::png::lodepng_state_init(&state); + // use less aggressive compression + state.encoder.zlibsettings.btype = 2; + state.encoder.zlibsettings.use_lz77 = 0; + if (comments.size() % 2 != 0) + { + std::cerr << "PNGEncoder::Encode comments missing value for the last key.\n"; + std::cerr << "Ignoring the last key.\n"; + } + if (comments.size() > 1) + { + vtkm::png::lodepng_info_init(&state.info_png); + // Comments are in pairs with a key and a value, using + // comments.size()-1 ensures that we don't use the last + // comment if the length of the vector isn't a multiple of 2. + for (int i = 0; i < comments.size() - 1; i += 2) + vtkm::png::lodepng_add_text(&state.info_png, comments[i].c_str(), comments[i + 1].c_str()); + } + + unsigned error = + vtkm::png::lodepng_encode(&m_buffer, &m_buffer_size, &rgba_flip[0], width, height, &state); + delete[] rgba_flip; + + if (error) + { + std::cerr << "lodepng_encode_memory failed\n"; + } +} + +void PNGEncoder::Save(const std::string& filename) +{ + if (m_buffer == NULL) + { + std::cerr << "Save must be called after encode()\n"; + /// we have a problem ...! + return; + } + + unsigned error = vtkm::png::lodepng_save_file(m_buffer, m_buffer_size, filename.c_str()); + if (error) + { + std::cerr << "Error saving PNG buffer to file: " << filename << "\n"; + } +} + +void* PNGEncoder::PngBuffer() +{ + return (void*)m_buffer; +} + +size_t PNGEncoder::PngBufferSize() +{ + return m_buffer_size; +} + +void PNGEncoder::Cleanup() +{ + if (m_buffer != NULL) + { + //lodepng_free(m_buffer); + // ^-- Not found even if LODEPNG_COMPILE_ALLOCATORS is defined? + // simply use "free" + free(m_buffer); + m_buffer = NULL; + m_buffer_size = 0; + } +} + +} +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/PNGEncoder.h b/vtkm/rendering/compositing/PNGEncoder.h new file mode 100644 index 000000000..d7f97ca67 --- /dev/null +++ b/vtkm/rendering/compositing/PNGEncoder.h @@ -0,0 +1,57 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_PNGEncoder_h +#define vtk_m_rendering_compositing_PNGEncoder_h + +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +class PNGEncoder +{ +public: + PNGEncoder(); + ~PNGEncoder(); + + void Encode(const unsigned char* rgba_in, const int width, const int height); + void Encode(const float* rgba_in, const int width, const int height); + void Encode(const unsigned char* rgba_in, + const int width, + const int height, + const std::vector& comments); + void Encode(const float* rgba_in, + const int width, + const int height, + const std::vector& comments); + void Save(const std::string& filename); + + void* PngBuffer(); + size_t PngBufferSize(); + + void Cleanup(); + +private: + unsigned char* m_buffer; + size_t m_buffer_size; +}; + + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_PNGEncoder_h diff --git a/vtkm/rendering/compositing/PayloadCompositor.cxx b/vtkm/rendering/compositing/PayloadCompositor.cxx new file mode 100644 index 000000000..6b51fff9e --- /dev/null +++ b/vtkm/rendering/compositing/PayloadCompositor.cxx @@ -0,0 +1,62 @@ +#include +#include + +#include +#include + +#ifdef VTKH_PARALLEL +#include +#include +#include +#include +#endif + +using namespace vtkm::rendering::compositing; + +namespace vtkh +{ + +PayloadCompositor::PayloadCompositor() {} + +void PayloadCompositor::ClearImages() +{ + m_images.clear(); +} + +void PayloadCompositor::AddImage(PayloadImage& image) +{ + assert(image.GetNumberOfPixels() != 0); + + if (m_images.size() == 0) + { + m_images.push_back(image); + } + else + { + // + // Do local composite and keep a single image + // + PayloadImageCompositor compositor; + compositor.ZBufferComposite(m_images[0], image); + } +} + +PayloadImage PayloadCompositor::Composite() +{ + assert(m_images.size() != 0); + // nothing to do here in serial. Images were composited as + // they were added to the compositor +#ifdef VTKH_PARALLEL + vtkhdiy::mpi::communicator diy_comm; + diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle())); + + assert(m_images.size() == 1); + RadixKCompositor compositor; + compositor.CompositeSurface(diy_comm, this->m_images[0]); +#endif + // Make this a param to avoid the copy? + return m_images[0]; +} + + +} // namespace vtkh diff --git a/vtkm/rendering/compositing/PayloadCompositor.h b/vtkm/rendering/compositing/PayloadCompositor.h new file mode 100644 index 000000000..96f7057d3 --- /dev/null +++ b/vtkm/rendering/compositing/PayloadCompositor.h @@ -0,0 +1,28 @@ +#ifndef VTKH_PAYLOAD_COMPOSITOR_HPP +#define VTKH_PAYLOAD_COMPOSITOR_HPP + +#include + +#include + +namespace vtkh +{ + +class VTKM_RENDERING_EXPORT PayloadCompositor +{ +public: + PayloadCompositor(); + + void ClearImages(); + + void AddImage(vtkm::rendering::compositing::PayloadImage& image); + + vtkm::rendering::compositing::PayloadImage Composite(); + +protected: + std::vector m_images; +}; + +}; + +#endif diff --git a/vtkm/rendering/compositing/PayloadImage.cxx b/vtkm/rendering/compositing/PayloadImage.cxx new file mode 100644 index 000000000..10ff0f89d --- /dev/null +++ b/vtkm/rendering/compositing/PayloadImage.cxx @@ -0,0 +1,33 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +void PayloadImage::Save(const std::string& name, const std::vector& comments) +{ + PNGEncoder encoder; + encoder.Encode(&m_payloads[0], + m_bounds.X.Max - m_bounds.X.Min + 1, + m_bounds.Y.Max - m_bounds.Y.Min + 1, + comments); + encoder.Save(name); +} + +} +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/PayloadImage.h b/vtkm/rendering/compositing/PayloadImage.h new file mode 100644 index 000000000..501e82715 --- /dev/null +++ b/vtkm/rendering/compositing/PayloadImage.h @@ -0,0 +1,211 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_PayloadImage_h +#define vtk_m_rendering_compositing_PayloadImage_h + +#include + +#include +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +struct VTKM_RENDERING_EXPORT PayloadImage +{ + // The image bounds are indicated by a grid starting at + // 1-width and 1-height. Actual width would be calculated + // m_bounds.X.Max - m_bounds.X.Min + 1 + // 1024 - 1 + 1 = 1024 + vtkm::Bounds m_orig_bounds; + vtkm::Bounds m_bounds; + std::vector m_payloads; + std::vector m_depths; + int m_orig_rank; + int m_payload_bytes; // Size of the payload in bytes + float m_default_value; + + PayloadImage() {} + + PayloadImage(const vtkm::Bounds& bounds, const int payload_bytes) + : m_orig_bounds(bounds) + , m_bounds(bounds) + , m_orig_rank(-1) + , m_payload_bytes(payload_bytes) + { + m_default_value = vtkm::Nan32(); + const int dx = bounds.X.Max - bounds.X.Min + 1; + const int dy = bounds.Y.Max - bounds.Y.Min + 1; + m_payloads.resize(dx * dy * m_payload_bytes); + m_depths.resize(dx * dy); + } + + void InitOriginal(const PayloadImage& other) + { + m_orig_bounds = other.m_orig_bounds; + m_bounds = other.m_orig_bounds; + m_payload_bytes = other.m_payload_bytes; + m_default_value = other.m_default_value; + + const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + m_payloads.resize(dx * dy * m_payload_bytes); + m_depths.resize(dx * dy); + + m_orig_rank = -1; + } + + int GetNumberOfPixels() const { return static_cast(m_depths.size()); } + + void Init(const unsigned char* payload_buffer, const float* depth_buffer, int width, int height) + { + m_bounds.X.Min = 1; + m_bounds.Y.Min = 1; + m_bounds.X.Max = width; + m_bounds.Y.Max = height; + m_orig_bounds = m_bounds; + const int size = width * height; + m_payloads.resize(size * m_payload_bytes); + m_depths.resize(size); + + std::copy(payload_buffer, payload_buffer + size * m_payload_bytes, &m_payloads[0]); + + std::copy(depth_buffer, depth_buffer + size, &m_depths[0]); + } + + // + // Fill this image with a sub-region of another image + // + void SubsetFrom(const PayloadImage& image, const vtkm::Bounds& sub_region) + { + m_orig_bounds = image.m_orig_bounds; + m_bounds = sub_region; + m_orig_rank = image.m_orig_rank; + m_payload_bytes = image.m_payload_bytes; + + assert(sub_region.X.Min >= image.m_bounds.X.Min); + assert(sub_region.Y.Min >= image.m_bounds.Y.Min); + assert(sub_region.X.Max <= image.m_bounds.X.Max); + assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + + const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + + const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; + //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + + const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; + const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int end_y = start_y + s_dy; + + size_t buffer_size = s_dx * s_dy * m_payload_bytes; + + m_payloads.resize(buffer_size); + m_depths.resize(s_dx * s_dy); + + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = start_y; y < end_y; ++y) + { + const int copy_to = (y - start_y) * s_dx; + const int copy_from = y * dx + start_x; + + std::copy(&image.m_payloads[copy_from * m_payload_bytes], + &image.m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, + &m_payloads[copy_to * m_payload_bytes]); + std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + } + } + + // + // Fills the passed in image with the contents of this image + // + void SubsetTo(PayloadImage& image) const + { + assert(m_bounds.X.Min >= image.m_bounds.X.Min); + assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); + assert(m_bounds.X.Max <= image.m_bounds.X.Max); + assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + + const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; + const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + + const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; + //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + + const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; + const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int y = 0; y < s_dy; ++y) + { + const int copy_to = (y + start_y) * dx + start_x; + const int copy_from = y * s_dx; + + std::copy(&m_payloads[copy_from * m_payload_bytes], + &m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, + &image.m_payloads[copy_to * m_payload_bytes]); + + std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + } + } + + void Swap(PayloadImage& other) + { + vtkm::Bounds orig = m_orig_bounds; + vtkm::Bounds bounds = m_bounds; + + m_orig_bounds = other.m_orig_bounds; + m_bounds = other.m_bounds; + + other.m_orig_bounds = orig; + other.m_bounds = bounds; + + m_payloads.swap(other.m_payloads); + m_depths.swap(other.m_depths); + } + + void Clear() + { + vtkm::Bounds empty; + m_orig_bounds = empty; + m_bounds = empty; + m_payloads.clear(); + m_depths.clear(); + } + + std::string ToString() const + { + std::stringstream ss; + ss << "Total size pixels " << (int)m_depths.size(); + ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; + ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ; + return ss.str(); + } + + void Save(const std::string& name, const std::vector& comments); +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_PayloadImage_h diff --git a/vtkm/rendering/compositing/PayloadImageCompositor.h b/vtkm/rendering/compositing/PayloadImageCompositor.h new file mode 100644 index 000000000..8ea3c4f4a --- /dev/null +++ b/vtkm/rendering/compositing/PayloadImageCompositor.h @@ -0,0 +1,74 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_PayloadImageCompositor_h +#define vtk_m_rendering_compositing_PayloadImageCompositor_h + +#include + + +#include +#include +#include + + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +class VTKM_RENDERING_EXPORT PayloadImageCompositor +{ +public: + void ZBufferComposite(vtkm::rendering::compositing::PayloadImage& front, + const vtkm::rendering::compositing::PayloadImage& image) + { + if (front.m_payload_bytes != image.m_payload_bytes) + { + std::cout << "very bad\n"; + } + assert(front.m_depths.size() == front.m_payloads.size() / front.m_payload_bytes); + assert(front.m_bounds.X.Min == image.m_bounds.X.Min); + assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); + assert(front.m_bounds.X.Max == image.m_bounds.X.Max); + assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + + const int size = static_cast(front.m_depths.size()); + const bool nan_check = image.m_default_value != image.m_default_value; +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + const float depth = image.m_depths[i]; + const float fdepth = front.m_depths[i]; + // this should handle NaNs correctly + const bool take_back = fmin(depth, fdepth) == depth; + + if (take_back) + { + const int offset = i * 4; + front.m_depths[i] = depth; + const size_t p_offset = i * front.m_payload_bytes; + std::copy(&image.m_payloads[p_offset], + &image.m_payloads[p_offset] + front.m_payload_bytes, + &front.m_payloads[p_offset]); + } + } + } +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_PayloadImageCompositor_h diff --git a/vtkm/rendering/compositing/RadixKCompositor.cxx b/vtkm/rendering/compositing/RadixKCompositor.cxx new file mode 100644 index 000000000..1bbdc2ace --- /dev/null +++ b/vtkm/rendering/compositing/RadixKCompositor.cxx @@ -0,0 +1,208 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include +#include +#include +//#include +#include +#include +#include + +/* +#include +#include +#include +#include +#include +*/ + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +template +void DepthComposite(ImageType& front, ImageType& back); + +template <> +void DepthComposite(PayloadImage& front, PayloadImage& back) +{ + vtkm::rendering::compositing::PayloadImageCompositor compositor; + compositor.ZBufferComposite(front, back); +} + +template <> +void DepthComposite(vtkm::rendering::compositing::Image& front, + vtkm::rendering::compositing::Image& back) +{ + vtkm::rendering::compositing::ImageCompositor compositor; + compositor.ZBufferComposite(front, back); +} + +template +void reduce_images(void* b, + const vtkmdiy::ReduceProxy& proxy, + const vtkmdiy::RegularSwapPartners& partners) +{ + ImageBlock* block = reinterpret_cast*>(b); + unsigned int round = proxy.round(); + ImageType& image = block->m_image; + // count the number of incoming pixels + if (proxy.in_link().size() > 0) + { + for (int i = 0; i < proxy.in_link().size(); ++i) + { + int gid = proxy.in_link().target(i).gid; + if (gid == proxy.gid()) + { + //skip revieving from self since we sent nothing + continue; + } + ImageType incoming; + proxy.dequeue(gid, incoming); + DepthComposite(image, incoming); + } // for in links + } + + if (proxy.out_link().size() == 0) + { + return; + } + // do compositing?? intermediate stage? + const int group_size = proxy.out_link().size(); + const int current_dim = partners.dim(round); + + //create balanced set of ranges for current dim + vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.m_bounds); + int range_length = image_bounds.max[current_dim] - image_bounds.min[current_dim]; + int base_step = range_length / group_size; + int rem = range_length % group_size; + std::vector bucket_sizes(group_size, base_step); + for (int i = 0; i < rem; ++i) + { + bucket_sizes[i]++; + } + + int count = 0; + for (int i = 0; i < group_size; ++i) + { + count += bucket_sizes[i]; + } + assert(count == range_length); + + std::vector subset_bounds(group_size, + vtkh::VTKMBoundsToDIY(image.m_bounds)); + int min_pixel = image_bounds.min[current_dim]; + for (int i = 0; i < group_size; ++i) + { + subset_bounds[i].min[current_dim] = min_pixel; + subset_bounds[i].max[current_dim] = min_pixel + bucket_sizes[i]; + min_pixel += bucket_sizes[i]; + } + + //debug + if (group_size > 1) + { + for (int i = 1; i < group_size; ++i) + { + assert(subset_bounds[i - 1].max[current_dim] == subset_bounds[i].min[current_dim]); + } + + assert(subset_bounds[0].min[current_dim] == image_bounds.min[current_dim]); + assert(subset_bounds[group_size - 1].max[current_dim] == image_bounds.max[current_dim]); + } + + std::vector out_images(group_size); + for (int i = 0; i < group_size; ++i) + { + out_images[i].SubsetFrom(image, vtkh::DIYBoundsToVTKM(subset_bounds[i])); + } //for + + for (int i = 0; i < group_size; ++i) + { + if (proxy.out_link().target(i).gid == proxy.gid()) + { + image.Swap(out_images[i]); + } + else + { + proxy.enqueue(proxy.out_link().target(i), out_images[i]); + } + } //for + +} // reduce images + +RadixKCompositor::RadixKCompositor() {} + +RadixKCompositor::~RadixKCompositor() {} + +template +void RadixKCompositor::CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image) +{ + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.m_orig_bounds); + + // tells diy to use one thread + const int num_threads = 1; + const int num_blocks = diy_comm.size(); + const int magic_k = 8; + + vtkmdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { + ImageBlock* block = reinterpret_cast*>(b); + delete block; + }); + + // create an assigner with one block per rank + vtkmdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + vtkm::rendering::compositing::AddImageBlock create(master, image); + const int num_dims = 2; + vtkmdiy::RegularDecomposer decomposer( + num_dims, global_bounds, num_blocks); + decomposer.decompose(diy_comm.rank(), assigner, create); + vtkmdiy::RegularSwapPartners partners(decomposer, magic_k, + false); // false == distance halving + vtkmdiy::reduce(master, assigner, partners, reduce_images); + + + //MPICollect(image, diy_comm); + vtkmdiy::all_to_all( + master, assigner, vtkm::rendering::compositing::CollectImages(decomposer), magic_k); + + if (diy_comm.rank() == 0) + { + master.prof.output(m_timing_log); + } +} + +void RadixKCompositor::CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, + vtkm::rendering::compositing::Image& image) +{ + CompositeImpl(diy_comm, image); +} + +void RadixKCompositor::CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, + vtkm::rendering::compositing::PayloadImage& image) +{ + CompositeImpl(diy_comm, image); +} + +std::string RadixKCompositor::GetTimingString() +{ + std::string res(m_timing_log.str()); + m_timing_log.str(""); + return res; +} + +} +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/RadixKCompositor.h b/vtkm/rendering/compositing/RadixKCompositor.h new file mode 100644 index 000000000..cd94c84ce --- /dev/null +++ b/vtkm/rendering/compositing/RadixKCompositor.h @@ -0,0 +1,54 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_RadixKCompositor_h +#define vtk_m_rendering_compositing_RadixKCompositor_h + +#include + +#include +#include + +#include +#ifdef VTKM_ENABLE_MPI +//#include +//#include +//#include +#endif + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +class VTKM_RENDERING_EXPORT RadixKCompositor +{ +public: + RadixKCompositor(); + ~RadixKCompositor(); + void CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, Image& image); + void CompositeSurface(vtkmdiy::mpi::communicator& diy_comm, PayloadImage& image); + + template + void CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image); + + std::string GetTimingString(); + +private: + std::stringstream m_timing_log; +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_RadixKCompositor_h diff --git a/vtkm/rendering/compositing/vtkm_diy_collect.h b/vtkm/rendering/compositing/vtkm_diy_collect.h new file mode 100644 index 000000000..188c72949 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_collect.h @@ -0,0 +1,88 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_vtkm_diy_collect_h +#define vtk_m_rendering_compositing_vtkm_diy_collect_h + +#include + +#include +#include +#include +#include +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +template +struct CollectImages +{ + const vtkmdiy::RegularDecomposer& m_decomposer; + + CollectImages(const vtkmdiy::RegularDecomposer& decomposer) + : m_decomposer(decomposer) + { + } + + void operator()(void* b, const vtkmdiy::ReduceProxy& proxy) const + { + ImageBlock* block = reinterpret_cast*>(b); + // + // first round we have no incoming. Take the images we have + // and sent them to to the right rank + // + const int collection_rank = 0; + if (proxy.in_link().size() == 0) + { + + if (proxy.gid() != collection_rank) + { + int dest_gid = collection_rank; + vtkmdiy::BlockID dest = proxy.out_link().target(dest_gid); + + proxy.enqueue(dest, block->m_image); + block->m_image.Clear(); + } + } // if + else if (proxy.gid() == collection_rank) + { + ImageType final_image; + final_image.InitOriginal(block->m_image); + block->m_image.SubsetTo(final_image); + + for (int i = 0; i < proxy.in_link().size(); ++i) + { + int gid = proxy.in_link().target(i).gid; + + if (gid == collection_rank) + { + continue; + } + ImageType incoming; + proxy.dequeue(gid, incoming); + incoming.SubsetTo(final_image); + } // for + block->m_image.Swap(final_image); + } // else + + } // operator +}; + +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_vtkm_diy_collect_h diff --git a/vtkm/rendering/compositing/vtkm_diy_image_block.h b/vtkm/rendering/compositing/vtkm_diy_image_block.h new file mode 100644 index 000000000..2e9120946 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_image_block.h @@ -0,0 +1,194 @@ +#ifndef VTKH_DIY_IMAGE_BLOCK_HPP +#define VTKH_DIY_IMAGE_BLOCK_HPP + +#include +#include +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing +{ + +template +struct ImageBlock +{ + ImageType& m_image; + ImageBlock(ImageType& image) + : m_image(image) + { + } +}; + +struct MultiImageBlock +{ + std::vector& m_images; + vtkm::rendering::compositing::Image& m_output; + MultiImageBlock(std::vector& images, + vtkm::rendering::compositing::Image& output) + : m_images(images) + , m_output(output) + { + } +}; + +template +struct AddImageBlock +{ + ImageType& m_image; + const vtkmdiy::Master& m_master; + + AddImageBlock(vtkmdiy::Master& master, ImageType& image) + : m_image(image) + , m_master(master) + { + } + template + void operator()(int gid, + const BoundsType&, // local_bounds + const BoundsType&, // local_with_ghost_bounds + const BoundsType&, // domain_bounds + const LinkType& link) const + { + ImageBlock* block = new ImageBlock(m_image); + LinkType* linked = new LinkType(link); + vtkmdiy::Master& master = const_cast(m_master); + master.add(gid, block, linked); + } +}; + +struct AddMultiImageBlock +{ + std::vector& m_images; + vtkm::rendering::compositing::Image& m_output; + const vtkmdiy::Master& m_master; + + AddMultiImageBlock(vtkmdiy::Master& master, + std::vector& images, + vtkm::rendering::compositing::Image& output) + : m_master(master) + , m_images(images) + , m_output(output) + { + } + template + void operator()(int gid, + const BoundsType&, // local_bounds + const BoundsType&, // local_with_ghost_bounds + const BoundsType&, // domain_bounds + const LinkType& link) const + { + MultiImageBlock* block = new MultiImageBlock(m_images, m_output); + LinkType* linked = new LinkType(link); + vtkmdiy::Master& master = const_cast(m_master); + int lid = master.add(gid, block, linked); + } +}; + +} +} +} //namespace vtkm::rendering::compositing + +namespace vtkmdiy +{ + +template <> +struct Serialization +{ + static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::PayloadImage& image) + { + vtkmdiy::save(bb, image.m_orig_bounds.X.Min); + vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); + vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); + vtkmdiy::save(bb, image.m_orig_bounds.X.Max); + vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); + vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + + vtkmdiy::save(bb, image.m_bounds.X.Min); + vtkmdiy::save(bb, image.m_bounds.Y.Min); + vtkmdiy::save(bb, image.m_bounds.Z.Min); + vtkmdiy::save(bb, image.m_bounds.X.Max); + vtkmdiy::save(bb, image.m_bounds.Y.Max); + vtkmdiy::save(bb, image.m_bounds.Z.Max); + + vtkmdiy::save(bb, image.m_payloads); + vtkmdiy::save(bb, image.m_payload_bytes); + vtkmdiy::save(bb, image.m_depths); + vtkmdiy::save(bb, image.m_orig_rank); + } + + static void load(BinaryBuffer& bb, vtkm::rendering::compositing::PayloadImage& image) + { + vtkmdiy::load(bb, image.m_orig_bounds.X.Min); + vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); + vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); + vtkmdiy::load(bb, image.m_orig_bounds.X.Max); + vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); + vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + + vtkmdiy::load(bb, image.m_bounds.X.Min); + vtkmdiy::load(bb, image.m_bounds.Y.Min); + vtkmdiy::load(bb, image.m_bounds.Z.Min); + vtkmdiy::load(bb, image.m_bounds.X.Max); + vtkmdiy::load(bb, image.m_bounds.Y.Max); + vtkmdiy::load(bb, image.m_bounds.Z.Max); + + vtkmdiy::load(bb, image.m_payloads); + vtkmdiy::load(bb, image.m_payload_bytes); + vtkmdiy::load(bb, image.m_depths); + vtkmdiy::load(bb, image.m_orig_rank); + } +}; + +template <> +struct Serialization +{ + static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::Image& image) + { + vtkmdiy::save(bb, image.m_orig_bounds.X.Min); + vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); + vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); + vtkmdiy::save(bb, image.m_orig_bounds.X.Max); + vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); + vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + + vtkmdiy::save(bb, image.m_bounds.X.Min); + vtkmdiy::save(bb, image.m_bounds.Y.Min); + vtkmdiy::save(bb, image.m_bounds.Z.Min); + vtkmdiy::save(bb, image.m_bounds.X.Max); + vtkmdiy::save(bb, image.m_bounds.Y.Max); + vtkmdiy::save(bb, image.m_bounds.Z.Max); + + vtkmdiy::save(bb, image.m_pixels); + vtkmdiy::save(bb, image.m_depths); + vtkmdiy::save(bb, image.m_orig_rank); + vtkmdiy::save(bb, image.m_composite_order); + } + + static void load(BinaryBuffer& bb, vtkm::rendering::compositing::Image& image) + { + vtkmdiy::load(bb, image.m_orig_bounds.X.Min); + vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); + vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); + vtkmdiy::load(bb, image.m_orig_bounds.X.Max); + vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); + vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + + vtkmdiy::load(bb, image.m_bounds.X.Min); + vtkmdiy::load(bb, image.m_bounds.Y.Min); + vtkmdiy::load(bb, image.m_bounds.Z.Min); + vtkmdiy::load(bb, image.m_bounds.X.Max); + vtkmdiy::load(bb, image.m_bounds.Y.Max); + vtkmdiy::load(bb, image.m_bounds.Z.Max); + + vtkmdiy::load(bb, image.m_pixels); + vtkmdiy::load(bb, image.m_depths); + vtkmdiy::load(bb, image.m_orig_rank); + vtkmdiy::load(bb, image.m_composite_order); + } +}; +} //namespace vtkmdiy + +#endif diff --git a/vtkm/rendering/compositing/vtkm_diy_partial_blocks.h b/vtkm/rendering/compositing/vtkm_diy_partial_blocks.h new file mode 100644 index 000000000..bb2d82f55 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_partial_blocks.h @@ -0,0 +1,216 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2018, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-749865 +// +// All rights reserved. +// +// This file is part of Rover. +// +// Please also read rover/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +#ifndef rover_blocks_h +#define rover_blocks_h + +#include + +#include "AbsorptionPartial.hpp" +#include "EmissionPartial.hpp" +#include "VolumePartial.hpp" + +namespace vtkh +{ + +//--------------------------------------Volume Block Structure----------------------------------- +template +struct VolumeBlock +{ + typedef vtkhdiy::DiscreteBounds Bounds; + typedef VolumePartial PartialType; + std::vector>& m_partials; + VolumeBlock(std::vector>& partials) + : m_partials(partials) + { + } +}; + + +//--------------------------------------Absorption Block Structure------------------------------ +template +struct AbsorptionBlock +{ + typedef vtkhdiy::DiscreteBounds Bounds; + typedef AbsorptionPartial PartialType; + std::vector>& m_partials; + + AbsorptionBlock(std::vector>& partials) + : m_partials(partials) + { + } +}; + +//--------------------------------------Emission Block Structure------------------------------ +template +struct EmissionBlock +{ + typedef vtkhdiy::DiscreteBounds Bounds; + typedef EmissionPartial PartialType; + std::vector>& m_partials; + + EmissionBlock(std::vector>& partials) + : m_partials(partials) + { + } +}; + +//--------------------------------------Add Block Template----------------------------------- +template +struct AddBlock +{ + typedef typename BlockType::PartialType PartialType; + typedef BlockType Block; + std::vector& m_partials; + const vtkhdiy::Master& m_master; + + AddBlock(vtkhdiy::Master& master, std::vector& partials) + : m_master(master) + , m_partials(partials) + { + } + template + void operator()(int gid, + const BoundsType& local_bounds, + const BoundsType& local_with_ghost_bounds, + const BoundsType& domain_bounds, + const LinkType& link) const + { + (void)local_bounds; + (void)domain_bounds; + (void)local_with_ghost_bounds; + Block* block = new Block(m_partials); + LinkType* rg_link = new LinkType(link); + vtkhdiy::Master& master = const_cast(m_master); + int lid = master.add(gid, block, rg_link); + (void)lid; + } +}; + +} //namespace vtkh + +//-------------------------------Serialization Specializations-------------------------------- +namespace vtkhdiy +{ + +template <> +struct Serialization> +{ + + static void save(BinaryBuffer& bb, const vtkh::AbsorptionPartial& partial) + { + vtkhdiy::save(bb, partial.m_bins); + vtkhdiy::save(bb, partial.m_pixel_id); + vtkhdiy::save(bb, partial.m_depth); + } + + static void load(BinaryBuffer& bb, vtkh::AbsorptionPartial& partial) + { + vtkhdiy::load(bb, partial.m_bins); + vtkhdiy::load(bb, partial.m_pixel_id); + vtkhdiy::load(bb, partial.m_depth); + } +}; + +template <> +struct Serialization> +{ + + static void save(BinaryBuffer& bb, const vtkh::AbsorptionPartial& partial) + { + vtkhdiy::save(bb, partial.m_bins); + vtkhdiy::save(bb, partial.m_pixel_id); + vtkhdiy::save(bb, partial.m_depth); + } + + static void load(BinaryBuffer& bb, vtkh::AbsorptionPartial& partial) + { + vtkhdiy::load(bb, partial.m_bins); + vtkhdiy::load(bb, partial.m_pixel_id); + vtkhdiy::load(bb, partial.m_depth); + } +}; + +template <> +struct Serialization> +{ + + static void save(BinaryBuffer& bb, const vtkh::EmissionPartial& partial) + { + vtkhdiy::save(bb, partial.m_bins); + vtkhdiy::save(bb, partial.m_emission_bins); + vtkhdiy::save(bb, partial.m_pixel_id); + vtkhdiy::save(bb, partial.m_depth); + } + + static void load(BinaryBuffer& bb, vtkh::EmissionPartial& partial) + { + vtkhdiy::load(bb, partial.m_bins); + vtkhdiy::load(bb, partial.m_emission_bins); + vtkhdiy::load(bb, partial.m_pixel_id); + vtkhdiy::load(bb, partial.m_depth); + } +}; + +template <> +struct Serialization> +{ + + static void save(BinaryBuffer& bb, const vtkh::EmissionPartial& partial) + { + vtkhdiy::save(bb, partial.m_bins); + vtkhdiy::save(bb, partial.m_emission_bins); + vtkhdiy::save(bb, partial.m_pixel_id); + vtkhdiy::save(bb, partial.m_depth); + } + + static void load(BinaryBuffer& bb, vtkh::EmissionPartial& partial) + { + vtkhdiy::load(bb, partial.m_bins); + vtkhdiy::load(bb, partial.m_emission_bins); + vtkhdiy::load(bb, partial.m_pixel_id); + vtkhdiy::load(bb, partial.m_depth); + } +}; + +} // namespace diy + +#endif diff --git a/vtkm/rendering/compositing/vtkm_diy_partial_collect.h b/vtkm/rendering/compositing/vtkm_diy_partial_collect.h new file mode 100644 index 000000000..48eb1b629 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_partial_collect.h @@ -0,0 +1,185 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2018, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-749865 +// +// All rights reserved. +// +// This file is part of Rover. +// +// Please also read rover/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +#ifndef rover_compositing_collect_h +#define rover_compositing_collect_h + +#include "AbsorptionPartial.hpp" +#include "EmissionPartial.hpp" +#include "VolumePartial.hpp" +#include +#include +#include +#include + +namespace vtkh +{ +// +// Collect struct sends all data to a single node. +// +template +struct Collect +{ + const vtkhdiy::RegularDecomposer& m_decomposer; + + Collect(const vtkhdiy::RegularDecomposer& decomposer) + : m_decomposer(decomposer) + { + } + + void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const + { + BlockType* block = static_cast(v_block); + // + // first round we have no incoming. Take the partials we have + // and sent them to to the right rank + // + const int collection_rank = 0; + if (proxy.in_link().size() == 0 && proxy.gid() != collection_rank) + { + int dest_gid = collection_rank; + vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid); + proxy.enqueue(dest, block->m_partials); + + block->m_partials.clear(); + + } // if + else if (proxy.gid() == collection_rank) + { + + for (int i = 0; i < proxy.in_link().size(); ++i) + { + int gid = proxy.in_link().target(i).gid; + if (gid == collection_rank) + { + continue; + } + //TODO: leave the paritals that start here, here + std::vector incoming_partials; + proxy.dequeue(gid, incoming_partials); + const int incoming_size = incoming_partials.size(); + // TODO: make this a std::copy + for (int j = 0; j < incoming_size; ++j) + { + block->m_partials.push_back(incoming_partials[j]); + } + } // for + } // else + + } // operator +}; + +// +// collect uses the all-to-all construct to perform a gather to +// the root rank. All other ranks will have no data +// +template +void collect_detail(std::vector& partials, MPI_Comm comm) +{ + typedef typename AddBlockType::Block Block; + + vtkhdiy::mpi::communicator world(comm); + vtkhdiy::ContinuousBounds global_bounds; + global_bounds.min[0] = 0; + global_bounds.max[0] = 1; + + // tells diy to use all availible threads + const int num_threads = -1; + const int num_blocks = world.size(); + const int magic_k = 2; + + vtkhdiy::Master master(world, num_threads); + + // create an assigner with one block per rank + vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + AddBlockType create(master, partials); + + const int dims = 1; + vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + decomposer.decompose(world.rank(), assigner, create); + + vtkhdiy::all_to_all(master, assigner, Collect(decomposer), magic_k); +} + +template +void collect(std::vector& partials, MPI_Comm comm); + +template <> +void collect>(std::vector>& partials, MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +template <> +void collect>(std::vector>& partials, MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +template <> +void collect>(std::vector>& partials, + MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +template <> +void collect>(std::vector>& partials, + MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +template <> +void collect>(std::vector>& partials, MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +template <> +void collect>(std::vector>& partials, MPI_Comm comm) +{ + collect_detail>>(partials, comm); +} + +} // namespace rover + +#endif diff --git a/vtkm/rendering/compositing/vtkm_diy_partial_redistribute.h b/vtkm/rendering/compositing/vtkm_diy_partial_redistribute.h new file mode 100644 index 000000000..7bad6e7c0 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_partial_redistribute.h @@ -0,0 +1,226 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2018, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-749865 +// +// All rights reserved. +// +// This file is part of Rover. +// +// Please also read rover/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +#ifndef rover_compositing_redistribute_h +#define rover_compositing_redistribute_h + +#include "vtkh_diy_partial_blocks.hpp" +#include +#include +#include +#include +#include + +namespace vtkh +{ +// +// Redistributes partial composites to the ranks that owns +// that sectoon of the image. Currently, the domain is decomposed +// in 1-D from min_pixel to max_pixel. +// +template +struct Redistribute +{ + const vtkhdiy::RegularDecomposer& m_decomposer; + + Redistribute(const vtkhdiy::RegularDecomposer& decomposer) + : m_decomposer(decomposer) + { + } + + void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const + { + BlockType* block = static_cast(v_block); + // + // first round we have no incoming. Take the partials we have + // and sent them to to the right rank + // + if (proxy.in_link().size() == 0) + { + const int size = block->m_partials.size(); + std::map> outgoing; + + for (int i = 0; i < size; ++i) + { + vtkhdiy::Point point; + point[0] = block->m_partials[i].m_pixel_id; + int dest_gid = m_decomposer.point_to_gid(point); + vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid); + outgoing[dest].push_back(block->m_partials[i]); + } //for + + block->m_partials.clear(); + + + for (int i = 0; i < proxy.out_link().size(); ++i) + { + int dest_gid = proxy.out_link().target(i).gid; + vtkhdiy::BlockID dest = proxy.out_link().target(dest_gid); + proxy.enqueue(dest, outgoing[dest]); + //outgoing[dest].clear(); + } + + } // if + else + { + for (int i = 0; i < proxy.in_link().size(); ++i) + { + int gid = proxy.in_link().target(i).gid; + std::vector incoming_partials; + proxy.dequeue(gid, incoming_partials); + const int incoming_size = incoming_partials.size(); + // TODO: make this a std::copy + for (int j = 0; j < incoming_size; ++j) + { + block->m_partials.push_back(incoming_partials[j]); + } + } // for + + } // else + MPI_Barrier(MPI_COMM_WORLD); //HACK + } // operator +}; + + +template +void redistribute_detail(std::vector& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + typedef typename AddBlockType::Block Block; + + vtkhdiy::mpi::communicator world(comm); + vtkhdiy::DiscreteBounds global_bounds; + global_bounds.min[0] = domain_min_pixel; + global_bounds.max[0] = domain_max_pixel; + + // tells diy to use all availible threads + const int num_threads = 1; + const int num_blocks = world.size(); + const int magic_k = 2; + + vtkhdiy::Master master(world, num_threads); + + // create an assigner with one block per rank + vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + AddBlockType create(master, partials); + + const int dims = 1; + vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + decomposer.decompose(world.rank(), assigner, create); + vtkhdiy::all_to_all(master, assigner, Redistribute(decomposer), magic_k); +} + +// +// Define a default template that cannot be instantiated +// +template +void redistribute(std::vector& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel); +// ----------------------------- VolumePartial Specialization------------------------------------------ +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +// ----------------------------- AbsorpPartial Specialization------------------------------------------ +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +// ----------------------------- EmissPartial Specialization------------------------------------------ +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +template <> +void redistribute>(std::vector>& partials, + MPI_Comm comm, + const int& domain_min_pixel, + const int& domain_max_pixel) +{ + redistribute_detail>>( + partials, comm, domain_min_pixel, domain_max_pixel); +} + +} //namespace rover + +#endif diff --git a/vtkm/rendering/compositing/vtkm_diy_utils.h b/vtkm/rendering/compositing/vtkm_diy_utils.h new file mode 100644 index 000000000..1c6985617 --- /dev/null +++ b/vtkm/rendering/compositing/vtkm_diy_utils.h @@ -0,0 +1,49 @@ +#ifndef VTKH_DIY_UTILS_HPP +#define VTKH_DIY_UTILS_HPP + +#include +#include + +namespace vtkh +{ + +static vtkm::Bounds DIYBoundsToVTKM(const vtkmdiy::DiscreteBounds& bounds) +{ + vtkm::Bounds vtkm_bounds; + + vtkm_bounds.X.Min = bounds.min[0]; + vtkm_bounds.Y.Min = bounds.min[1]; + vtkm_bounds.Z.Min = bounds.min[2]; + + vtkm_bounds.X.Max = bounds.max[0]; + vtkm_bounds.Y.Max = bounds.max[1]; + vtkm_bounds.Z.Max = bounds.max[2]; + return vtkm_bounds; +} + +static vtkmdiy::DiscreteBounds VTKMBoundsToDIY(const vtkm::Bounds& bounds) +{ + vtkmdiy::DiscreteBounds diy_bounds(3); + + diy_bounds.min[0] = bounds.X.Min; + diy_bounds.min[1] = bounds.Y.Min; + + diy_bounds.max[0] = bounds.X.Max; + diy_bounds.max[1] = bounds.Y.Max; + + if (bounds.Z.IsNonEmpty()) + { + diy_bounds.min[2] = bounds.Z.Min; + diy_bounds.max[2] = bounds.Z.Max; + } + else + { + diy_bounds.min[2] = 0; + diy_bounds.max[2] = 0; + } + return diy_bounds; +} + +} //namespace vtkh + +#endif diff --git a/vtkm/rendering/testing/CMakeLists.txt b/vtkm/rendering/testing/CMakeLists.txt index 0b97eb5bf..b607c71a3 100644 --- a/vtkm/rendering/testing/CMakeLists.txt +++ b/vtkm/rendering/testing/CMakeLists.txt @@ -26,3 +26,16 @@ set(unit_tests ) vtkm_unit_tests(SOURCES ${unit_tests}) + +#add distributed tests i.e.test to run with MPI +#if MPI is enabled. +if (VTKm_ENABLE_MPI) + set(mpi_unit_tests + UnitTestImageCompositing.cxx + ) + vtkm_unit_tests( + MPI + DEVICE_SOURCES ${mpi_unit_tests} + USE_VTKM_JOB_POOL + ) +endif() diff --git a/vtkm/rendering/testing/UnitTestImageCompositing.cxx b/vtkm/rendering/testing/UnitTestImageCompositing.cxx new file mode 100644 index 000000000..f4b3fc3a0 --- /dev/null +++ b/vtkm/rendering/testing/UnitTestImageCompositing.cxx @@ -0,0 +1,199 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +namespace +{ + +template +T* GetVTKMPointer(vtkm::cont::ArrayHandle& handle) +{ + return handle.WritePortal().GetArray(); +} + + +vtkm::cont::DataSet ReadDS(int rank) +{ + + std::string vtkFile; + + vtkm::io::VTKDataSetReader reader(vtkFile); +} + +vtkm::rendering::compositing::Image ConstImage(const std::size_t& width, + const std::size_t& height, + const vtkm::Vec4f& rgba, + const vtkm::FloatDefault& depth) +{ + auto numPix = width * height; + std::vector rgbaVals(numPix * 4); + std::vector depthVals(numPix, depth); + + for (std::size_t i = 0; i < numPix; i++) + { + rgbaVals[i * 4 + 0] = rgba[0]; + rgbaVals[i * 4 + 1] = rgba[1]; + rgbaVals[i * 4 + 2] = rgba[2]; + rgbaVals[i * 4 + 3] = rgba[3]; + } + + vtkm::rendering::compositing::Image img(vtkm::Bounds(0, width, 0, height, 0, 1)); + img.Init(rgbaVals.data(), depthVals.data(), width, height); + + return img; +} + +void TestImageComposite() +{ + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + + std::size_t width = 4, height = 4; + + //res is the background, initially black. + auto img0 = ConstImage(width, height, { 1, 0, 0, 1 }, 1.0); + + auto img1 = ConstImage(width, height, { 0, 1, 1, .5 }, 0.5); + + vtkm::rendering::compositing::Compositor compositor; + + compositor.SetCompositeMode(vtkm::rendering::compositing::Compositor::Z_BUFFER_SURFACE); + vtkm::rendering::compositing::Image img; + + if (comm.rank() == 0) + img = ConstImage(width, height, { 1, 0, 0, 1 }, 1.0); + else + img = ConstImage(width, height, { 0, 1, 1, .5 }, 0.5); + + compositor.AddImage(img.m_pixels.data(), img.m_depths.data(), width, height); + + auto res = compositor.Composite(); + + //vtkm::rendering::compositing::ImageCompositor imgCompositor; + //compositor.ZBufferComposite(res, img); + //compositor.Blend(res, img); + + if (comm.rank() == 0) + { + for (int i = 0; i < width * height; i++) + { + std::cout << i << ": "; + std::cout << (int)res.m_pixels[i * 4 + 0] << " "; + std::cout << (int)res.m_pixels[i * 4 + 1] << " "; + std::cout << (int)res.m_pixels[i * 4 + 2] << " "; + std::cout << (int)res.m_pixels[i * 4 + 3] << " "; + std::cout << res.m_depths[i] << std::endl; + } + } +} + +void TestRenderComposite() +{ + using vtkm::rendering::CanvasRayTracer; + using vtkm::rendering::MapperRayTracer; + using vtkm::rendering::MapperVolume; + using vtkm::rendering::MapperWireframer; + + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + + int numBlocks = comm.size() * 1; + int rank = comm.rank(); + + std::string fieldName = "tangle"; + std::string fname = ""; + if (comm.rank() == 0) + fname = "/home/dpn/tangle0.vtk"; + else + fname = "/home/dpn/tangle1.vtk"; + + vtkm::io::VTKDataSetReader reader(fname); + auto ds = reader.ReadDataSet(); + ds.PrintSummary(std::cout); + + + /* + vtkm::source::Tangle tangle; + tangle.SetPointDimensions({ 50, 50, 50 }); + vtkm::cont::DataSet ds = tangle.Execute(); + */ + + //auto ds = CreateTestData(rank, numBlocks, 32); + //auto fieldName = "point_data_Float32"; + + /* + vtkm::rendering::testing::RenderTestOptions options; + options.Mapper = vtkm::rendering::testing::MapperType::RayTracer; + options.AllowAnyDevice = false; + options.ColorTable = vtkm::cont::ColorTable::Preset::Inferno; + vtkm::rendering::testing::RenderTest(ds, "point_data_Float32", "rendering/raytracer/regular3D.png", options); + */ + + vtkm::rendering::Camera camera; + camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5)); + camera.SetLookAt(vtkm::Vec3f_32(1.0, 0.5, 0.5)); + camera.SetViewUp(vtkm::make_Vec(0.f, 1.f, 0.f)); + camera.SetClippingRange(1.f, 10.f); + camera.SetFieldOfView(60.f); + camera.SetPosition(vtkm::Vec3f_32(1.5, 1.5, 1.5)); + camera.SetPosition(vtkm::Vec3f_32(3, 3, 3)); + vtkm::cont::ColorTable colorTable("inferno"); + + // Background color: + vtkm::rendering::Color bg(0.2f, 0.2f, 0.2f, 1.0f); + vtkm::rendering::Actor actor( + ds.GetCellSet(), ds.GetCoordinateSystem(), ds.GetField(fieldName), colorTable); + vtkm::rendering::Scene scene; + scene.AddActor(actor); + int width = 512, height = 512; + CanvasRayTracer canvas(width, height); + + vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg); + view.Paint(); + + if (comm.rank() == 0) + view.SaveAs("volume0.png"); + else + view.SaveAs("volume1.png"); + + auto colors = &GetVTKMPointer(canvas.GetColorBuffer())[0][0]; + auto depths = GetVTKMPointer(canvas.GetDepthBuffer()); + + vtkm::rendering::compositing::Compositor compositor; + compositor.AddImage(colors, depths, width, height); + auto res = compositor.Composite(); + res.Save("RESULT.png", { "" }); +} + +void RenderTests() +{ + // TestImageComposite(); + TestRenderComposite(); +} + +} //namespace + +int UnitTestImageCompositing(int argc, char* argv[]) +{ + return vtkm::cont::testing::Testing::Run(RenderTests, argc, argv); +} diff --git a/vtkm/rendering/testing/t_vtkm_test_utils.h b/vtkm/rendering/testing/t_vtkm_test_utils.h new file mode 100644 index 000000000..c4ccd599e --- /dev/null +++ b/vtkm/rendering/testing/t_vtkm_test_utils.h @@ -0,0 +1,773 @@ +#ifndef t_test_utils_h +#define t_test_utils_h + +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +#define BASE_SIZE 32 +typedef vtkm::cont::ArrayHandleUniformPointCoordinates UniformCoords; + +struct SpatialDivision +{ + int m_mins[3]; + int m_maxs[3]; + + SpatialDivision() + : m_mins{ 0, 0, 0 } + , m_maxs{ 1, 1, 1 } + { + } + + bool CanSplit(int dim) { return m_maxs[dim] - m_mins[dim] + 1 > 1; } + + SpatialDivision Split(int dim) + { + SpatialDivision r_split; + r_split = *this; + assert(CanSplit(dim)); + int size = m_maxs[dim] - m_mins[dim] + 1; + int left_offset = size / 2; + + //shrink the left side + m_maxs[dim] = m_mins[dim] + left_offset - 1; + //shrink the right side + r_split.m_mins[dim] = m_maxs[dim] + 1; + return r_split; + } +}; + +SpatialDivision GetBlock(int block, int num_blocks, SpatialDivision total_size) +{ + + std::vector divs; + divs.push_back(total_size); + int avail = num_blocks - 1; + int current_dim = 0; + int missed_splits = 0; + const int num_dims = 3; + while (avail > 0) + { + const int current_size = divs.size(); + int temp_avail = avail; + for (int i = 0; i < current_size; ++i) + { + if (avail == 0) + break; + if (!divs[i].CanSplit(current_dim)) + { + continue; + } + divs.push_back(divs[i].Split(current_dim)); + --avail; + } + if (temp_avail == avail) + { + // dims were too small to make any spit + missed_splits++; + if (missed_splits == 3) + { + // we tried all three dims and could + // not make a split. + for (int i = 0; i < avail; ++i) + { + SpatialDivision empty; + empty.m_maxs[0] = 0; + empty.m_maxs[1] = 0; + empty.m_maxs[2] = 0; + divs.push_back(empty); + } + if (block == 0) + { + std::cerr << "** Warning **: data set size is too small to" + << " divide between " << num_blocks << " blocks. " + << " Adding " << avail << " empty data sets\n"; + } + + avail = 0; + } + } + else + { + missed_splits = 0; + } + + current_dim = (current_dim + 1) % num_dims; + } + + return divs.at(block); +} + +template +vtkm::cont::Field CreateCellScalarField(int size, const char* fieldName) +{ + vtkm::cont::ArrayHandle data; + data.Allocate(size); + + for (int i = 0; i < size; ++i) + { + FieldType val = i / vtkm::Float32(size); + data.WritePortal().Set(i, val); + } + + + vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Cells, data); + return field; +} + +vtkm::cont::Field CreateGhostScalarField(vtkm::Id3 dims) +{ + vtkm::Int32 size = dims[0] * dims[1] * dims[2]; + vtkm::cont::ArrayHandle data; + data.Allocate(size); + + for (int z = 0; z < dims[2]; ++z) + for (int y = 0; y < dims[1]; ++y) + for (int x = 0; x < dims[0]; ++x) + { + vtkm::UInt8 flag = 0; + if (x < 1 || x > dims[0] - 2) + flag = 1; + if (y < 1 || y > dims[1] - 2) + flag = 1; + if (z < 1 || z > dims[2] - 2) + flag = 1; + vtkm::Id index = z * dims[0] * dims[1] + y * dims[0] + x; + data.WritePortal().Set(index, flag); + } + + vtkm::cont::Field field("ghosts", vtkm::cont::Field::Association::Cells, data); + return field; +} + +template +vtkm::cont::Field CreatePointScalarField(UniformCoords coords, const char* fieldName) + +{ + const int size = coords.GetNumberOfValues(); + vtkm::cont::ArrayHandle data; + data.Allocate(size); + auto portal = coords.ReadPortal(); + for (int i = 0; i < size; ++i) + { + vtkm::Vec point = portal.Get(i); + + FieldType val = vtkm::Magnitude(point) + 1.f; + data.WritePortal().Set(i, val); + } + + vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Points, data); + return field; +} + +template +vtkm::cont::Field CreatePointVecField(int size, const char* fieldName) +{ + vtkm::cont::ArrayHandle> data; + data.Allocate(size); + + for (int i = 0; i < size; ++i) + { + FieldType val = i / FieldType(size); + + vtkm::Vec vec(val, -val, val); + + data.WritePortal().Set(i, vec); + } + + vtkm::cont::Field field(fieldName, vtkm::cont::Field::Association::Points, data); + return field; +} + +vtkm::cont::DataSet CreateTestData(int block, int num_blocks, int base_size) +{ + SpatialDivision mesh_size; + + mesh_size.m_mins[0] = 0; + mesh_size.m_mins[1] = 0; + mesh_size.m_mins[2] = 0; + + mesh_size.m_maxs[0] = num_blocks * base_size - 1; + mesh_size.m_maxs[1] = num_blocks * base_size - 1; + mesh_size.m_maxs[2] = num_blocks * base_size - 1; + + SpatialDivision local_block = GetBlock(block, num_blocks, mesh_size); + + vtkm::Vec origin; + origin[0] = local_block.m_mins[0]; + origin[1] = local_block.m_mins[1]; + origin[2] = local_block.m_mins[2]; + + vtkm::Vec spacing(1.f, 1.f, 1.f); + + vtkm::Id3 point_dims; + point_dims[0] = local_block.m_maxs[0] - local_block.m_mins[0] + 2; + point_dims[1] = local_block.m_maxs[1] - local_block.m_mins[1] + 2; + point_dims[2] = local_block.m_maxs[2] - local_block.m_mins[2] + 2; + + + vtkm::Id3 cell_dims; + cell_dims[0] = point_dims[0] - 1; + cell_dims[1] = point_dims[1] - 1; + cell_dims[2] = point_dims[2] - 1; + + vtkm::cont::DataSet data_set; + + UniformCoords point_handle(point_dims, origin, spacing); + + vtkm::cont::CoordinateSystem coords("coords", point_handle); + data_set.AddCoordinateSystem(coords); + + vtkm::cont::CellSetStructured<3> cell_set; + cell_set.SetPointDimensions(point_dims); + data_set.SetCellSet(cell_set); + + int num_points = point_dims[0] * point_dims[1] * point_dims[2]; + int num_cells = cell_dims[0] * cell_dims[1] * cell_dims[2]; + + data_set.AddField(CreatePointScalarField(point_handle, "point_data_Float32")); + data_set.AddField(CreatePointVecField(num_points, "vector_data_Float32")); + data_set.AddField(CreateCellScalarField(num_cells, "cell_data_Float32")); + data_set.AddField(CreatePointScalarField(point_handle, "point_data_Float64")); + data_set.AddField(CreatePointVecField(num_points, "vector_data_Float64")); + data_set.AddField(CreateCellScalarField(num_cells, "cell_data_Float64")); + data_set.AddField(CreateGhostScalarField(cell_dims)); + return data_set; +} + +vtkm::cont::DataSet CreateTestDataRectilinear(int block, int num_blocks, int base_size) +{ + SpatialDivision mesh_size; + + mesh_size.m_mins[0] = 0; + mesh_size.m_mins[1] = 0; + mesh_size.m_mins[2] = 0; + + mesh_size.m_maxs[0] = num_blocks * base_size - 1; + mesh_size.m_maxs[1] = num_blocks * base_size - 1; + mesh_size.m_maxs[2] = num_blocks * base_size - 1; + + SpatialDivision local_block = GetBlock(block, num_blocks, mesh_size); + + vtkm::Vec origin; + origin[0] = local_block.m_mins[0]; + origin[1] = local_block.m_mins[1]; + origin[2] = local_block.m_mins[2]; + + vtkm::Vec spacing(1.f, 1.f, 1.f); + + vtkm::Id3 point_dims; + point_dims[0] = local_block.m_maxs[0] - local_block.m_mins[0] + 2; + point_dims[1] = local_block.m_maxs[1] - local_block.m_mins[1] + 2; + point_dims[2] = local_block.m_maxs[2] - local_block.m_mins[2] + 2; + + + vtkm::Id3 cell_dims; + cell_dims[0] = point_dims[0] - 1; + cell_dims[1] = point_dims[1] - 1; + cell_dims[2] = point_dims[2] - 1; + + std::vector xvals, yvals, zvals; + xvals.resize((size_t)point_dims[0]); + xvals[0] = static_cast(local_block.m_mins[0]); + for (size_t i = 1; i < (size_t)point_dims[0]; i++) + xvals[i] = xvals[i - 1] + spacing[0]; + + yvals.resize((size_t)point_dims[1]); + yvals[0] = static_cast(local_block.m_mins[1]); + for (size_t i = 1; i < (size_t)point_dims[1]; i++) + yvals[i] = yvals[i - 1] + spacing[1]; + + zvals.resize((size_t)point_dims[2]); + zvals[0] = static_cast(local_block.m_mins[2]); + for (size_t i = 1; i < (size_t)point_dims[2]; i++) + zvals[i] = zvals[i - 1] + spacing[2]; + + vtkm::cont::DataSetBuilderRectilinear dataSetBuilder; + vtkm::cont::DataSet data_set = dataSetBuilder.Create(xvals, yvals, zvals); + + int num_points = point_dims[0] * point_dims[1] * point_dims[2]; + + data_set.AddField(CreatePointVecField(num_points, "vector_data_Float32")); + data_set.AddField(CreatePointVecField(num_points, "vector_data_Float64")); + + return data_set; +} + +vtkm::cont::DataSet CreateTestDataPoints(int num_points) +{ + std::vector x_vals; + std::vector y_vals; + std::vector z_vals; + std::vector shapes; + std::vector num_indices; + std::vector conn; + std::vector field; + + x_vals.resize(num_points); + y_vals.resize(num_points); + z_vals.resize(num_points); + shapes.resize(num_points); + conn.resize(num_points); + num_indices.resize(num_points); + field.resize(num_points); + + std::linear_congruential_engine rgen{ 0 }; + std::uniform_real_distribution dist{ -10., 10. }; + + for (int i = 0; i < num_points; ++i) + { + x_vals[i] = dist(rgen); + y_vals[i] = dist(rgen); + z_vals[i] = dist(rgen); + field[i] = dist(rgen); + shapes[i] = vtkm::CELL_SHAPE_VERTEX; + num_indices[i] = 1; + conn[i] = i; + } + vtkm::cont::DataSetBuilderExplicit dataSetBuilder; + vtkm::cont::DataSet data_set = + dataSetBuilder.Create(x_vals, y_vals, z_vals, shapes, num_indices, conn); + vtkm::cont::Field vfield = vtkm::cont::make_Field( + "point_data_Float64", vtkm::cont::Field::Association::Points, field, vtkm::CopyFlag::On); + data_set.AddField(vfield); + return data_set; +} + +//----------------------------------------------------------------------------- +//Create VTK-m Data Sets +//----------------------------------------------------------------------------- + +//Make a 2Duniform dataset. +inline vtkm::cont::DataSet Make2DUniformDataSet0() +{ + vtkm::cont::DataSetBuilderUniform dsb; + constexpr vtkm::Id2 dimensions(3, 2); + vtkm::cont::DataSet dataSet = dsb.Create(dimensions); + + constexpr vtkm::Id nVerts = 6; + constexpr vtkm::Float32 var[nVerts] = { 10.1f, 20.1f, 30.1f, 40.1f, 50.1f, 60.1f }; + + dataSet.AddPointField("pointvar", var, nVerts); + + constexpr vtkm::Float32 cellvar[2] = { 100.1f, 200.1f }; + dataSet.AddCellField("cellvar", cellvar, 2); + + return dataSet; +} + +//Make a 2D rectilinear dataset. +inline vtkm::cont::DataSet Make2DRectilinearDataSet0() +{ + vtkm::cont::DataSetBuilderRectilinear dsb; + std::vector X(3), Y(2); + + X[0] = 0.0f; + X[1] = 1.0f; + X[2] = 2.0f; + Y[0] = 0.0f; + Y[1] = 1.0f; + + vtkm::cont::DataSet dataSet = dsb.Create(X, Y); + + const vtkm::Id nVerts = 6; + vtkm::Float32 var[nVerts]; + for (int i = 0; i < nVerts; i++) + var[i] = (vtkm::Float32)i; + dataSet.AddPointField("pointvar", var, nVerts); + + const vtkm::Id nCells = 2; + vtkm::Float32 cellvar[nCells]; + for (int i = 0; i < nCells; i++) + cellvar[i] = (vtkm::Float32)i; + dataSet.AddCellField("cellvar", cellvar, nCells); + + return dataSet; +} + +inline vtkm::cont::DataSet Make3DExplicitDataSet5() +{ + vtkm::cont::DataSet dataSet; + + const int nVerts = 11; + using CoordType = vtkm::Vec3f_32; + CoordType coordinates[nVerts] = { + CoordType(0, 0, 0), //0 + CoordType(1, 0, 0), //1 + CoordType(1, 0, 1), //2 + CoordType(0, 0, 1), //3 + CoordType(0, 1, 0), //4 + CoordType(1, 1, 0), //5 + CoordType(1, 1, 1), //6 + CoordType(0, 1, 1), //7 + CoordType(2, 0.5, 0.5), //8 + CoordType(0, 2, 0), //9 + CoordType(1, 2, 0) //10 + }; + vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.2f, 40.2f, 50.3f, 60.2f, + 70.2f, 80.3f, 90.f, 10.f, 11.f }; + + dataSet.AddCoordinateSystem( + vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On)); + + //Set point scalar + dataSet.AddField(make_Field( + "pointvar", vtkm::cont::Field::Association::Points, vars, nVerts, vtkm::CopyFlag::On)); + + //Set cell scalar + const int nCells = 4; + vtkm::Float32 cellvar[nCells] = { 100.1f, 110.f, 120.2f, 130.5f }; + dataSet.AddField(make_Field( + "cellvar", vtkm::cont::Field::Association::Cells, cellvar, nCells, vtkm::CopyFlag::On)); + + vtkm::cont::CellSetExplicit<> cellSet; + vtkm::Vec ids; + + cellSet.PrepareToAddCells(nCells, 23); + + ids[0] = 0; + ids[1] = 1; + ids[2] = 5; + ids[3] = 4; + ids[4] = 3; + ids[5] = 2; + ids[6] = 6; + ids[7] = 7; + cellSet.AddCell(vtkm::CELL_SHAPE_HEXAHEDRON, 8, ids); + + ids[0] = 1; + ids[1] = 5; + ids[2] = 6; + ids[3] = 2; + ids[4] = 8; + cellSet.AddCell(vtkm::CELL_SHAPE_PYRAMID, 5, ids); + + ids[0] = 5; + ids[1] = 8; + ids[2] = 10; + ids[3] = 6; + cellSet.AddCell(vtkm::CELL_SHAPE_TETRA, 4, ids); + + ids[0] = 4; + ids[1] = 7; + ids[2] = 9; + ids[3] = 5; + ids[4] = 6; + ids[5] = 10; + cellSet.AddCell(vtkm::CELL_SHAPE_WEDGE, 6, ids); + + cellSet.CompleteAddingCells(nVerts); + + //todo this need to be a reference/shared_ptr style class + dataSet.SetCellSet(cellSet); + + return dataSet; +} + +inline vtkm::cont::DataSet Make3DUniformDataSet0() +{ + vtkm::cont::DataSetBuilderUniform dsb; + constexpr vtkm::Id3 dimensions(3, 2, 3); + vtkm::cont::DataSet dataSet = dsb.Create(dimensions); + + constexpr int nVerts = 18; + constexpr vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.1f, 40.1f, 50.2f, 60.2f, + 70.2f, 80.2f, 90.3f, 100.3f, 110.3f, 120.3f, + 130.4f, 140.4f, 150.4f, 160.4f, 170.5f, 180.5f }; + + //Set point and cell scalar + dataSet.AddPointField("pointvar", vars, nVerts); + + constexpr vtkm::Float32 cellvar[4] = { 100.1f, 100.2f, 100.3f, 100.4f }; + dataSet.AddCellField("cellvar", cellvar, 4); + + return dataSet; +} + +inline vtkm::cont::DataSet Make3DExplicitDataSet2() +{ + vtkm::cont::DataSet dataSet; + + const int nVerts = 8; + using CoordType = vtkm::Vec3f_32; + CoordType coordinates[nVerts] = { + CoordType(0, 0, 0), // 0 + CoordType(1, 0, 0), // 1 + CoordType(1, 0, 1), // 2 + CoordType(0, 0, 1), // 3 + CoordType(0, 1, 0), // 4 + CoordType(1, 1, 0), // 5 + CoordType(1, 1, 1), // 6 + CoordType(0, 1, 1) // 7 + }; + vtkm::Float32 vars[nVerts] = { 10.1f, 20.1f, 30.2f, 40.2f, 50.3f, 60.2f, 70.2f, 80.3f }; + + dataSet.AddCoordinateSystem( + vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On)); + + //Set point scalar + dataSet.AddField(make_Field( + "pointvar", vtkm::cont::Field::Association::Points, vars, nVerts, vtkm::CopyFlag::On)); + + //Set cell scalar + vtkm::Float32 cellvar[2] = { 100.1f }; + dataSet.AddField( + make_Field("cellvar", vtkm::cont::Field::Association::Cells, cellvar, 1, vtkm::CopyFlag::On)); + + vtkm::cont::CellSetExplicit<> cellSet; + vtkm::Vec ids; + ids[0] = 0; + ids[1] = 1; + ids[2] = 2; + ids[3] = 3; + ids[4] = 4; + ids[5] = 5; + ids[6] = 6; + ids[7] = 7; + + cellSet.PrepareToAddCells(1, 8); + cellSet.AddCell(vtkm::CELL_SHAPE_HEXAHEDRON, 8, ids); + cellSet.CompleteAddingCells(nVerts); + + //todo this need to be a reference/shared_ptr style class + dataSet.SetCellSet(cellSet); + + return dataSet; +} + +#if 0 + +namespace detail +{ + +template +struct TestValueImpl; +} //namespace detail + +// Many tests involve getting and setting values in some index-based structure +// (like an array). These tests also often involve trying many types. The +// overloaded TestValue function returns some unique value for an index for a +// given type. Different types might give different values. +// +template +static inline T TestValue(vtkm::Id index, T) +{ + return detail::TestValueImpl()(index); +} + +namespace detail +{ + +template +struct TestValueImpl +{ + T DoIt(vtkm::Id index, vtkm::TypeTraitsIntegerTag) const + { + constexpr bool larger_than_2bytes = sizeof(T) > 2; + if (larger_than_2bytes) + { + return T(index * 100); + } + else + { + return T(index + 100); + } + } + + T DoIt(vtkm::Id index, vtkm::TypeTraitsRealTag) const + { + return T(0.01f * static_cast(index) + 1.001f); + } + + T operator()(vtkm::Id index) const + { + return this->DoIt(index, typename vtkm::TypeTraits::NumericTag()); + } +}; + +template +struct TestValueImpl> +{ + vtkm::Vec operator()(vtkm::Id index) const + { + vtkm::Vec value; + for (vtkm::IdComponent i = 0; i < N; i++) + { + value[i] = TestValue(index * N + i, T()); + } + return value; + } +}; + +template +struct TestValueImpl> +{ + vtkm::Pair operator()(vtkm::Id index) const + { + return vtkm::Pair(TestValue(2 * index, U()), TestValue(2 * index + 1, V())); + } +}; + +template +struct TestValueImpl> +{ + vtkm::Matrix operator()(vtkm::Id index) const + { + vtkm::Matrix value; + vtkm::Id runningIndex = index * NumRow * NumCol; + for (vtkm::IdComponent row = 0; row < NumRow; ++row) + { + for (vtkm::IdComponent col = 0; col < NumCol; ++col) + { + value(row, col) = TestValue(runningIndex, T()); + ++runningIndex; + } + } + return value; + } +}; + +template <> +struct TestValueImpl +{ + std::string operator()(vtkm::Id index) const + { + std::stringstream stream; + stream << index; + return stream.str(); + } +}; + +} //namespace detail + +// Verifies that the contents of the given array portal match the values +// returned by vtkm::testing::TestValue. +template +static inline void CheckPortal(const PortalType& portal) +{ + using ValueType = typename PortalType::ValueType; + for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++) + { + ValueType expectedValue = TestValue(index, ValueType()); + ValueType foundValue = portal.Get(index); + if (!test_equal(expectedValue, foundValue)) + { + ASCENT_ERROR("Got unexpected value in array. Expected: " << expectedValue + << ", Found: " << foundValue << "\n"); + } + } +} + +/// Sets all the values in a given array portal to be the values returned +/// by vtkm::testing::TestValue. The ArrayPortal must be allocated first. +template +static inline void SetPortal(const PortalType& portal) +{ + using ValueType = typename PortalType::ValueType; + + for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++) + { + portal.Set(index, TestValue(index, ValueType())); + } +} + +#endif + +inline vtkm::cont::DataSet Make3DExplicitDataSetCowNose() +{ + // prepare data array + const int nVerts = 17; + using CoordType = vtkm::Vec3f_64; + CoordType coordinates[nVerts] = { + CoordType(0.0480879, 0.151874, 0.107334), CoordType(0.0293568, 0.245532, 0.125337), + CoordType(0.0224398, 0.246495, 0.1351), CoordType(0.0180085, 0.20436, 0.145316), + CoordType(0.0307091, 0.152142, 0.0539249), CoordType(0.0270341, 0.242992, 0.107567), + CoordType(0.000684071, 0.00272505, 0.175648), CoordType(0.00946217, 0.077227, 0.187097), + CoordType(-0.000168991, 0.0692243, 0.200755), CoordType(-0.000129414, 0.00247137, 0.176561), + CoordType(0.0174172, 0.137124, 0.124553), CoordType(0.00325994, 0.0797155, 0.184912), + CoordType(0.00191765, 0.00589327, 0.16608), CoordType(0.0174716, 0.0501928, 0.0930275), + CoordType(0.0242103, 0.250062, 0.126256), CoordType(0.0108188, 0.152774, 0.167914), + CoordType(5.41687e-05, 0.00137834, 0.175119) + }; + const int connectivitySize = 57; + vtkm::Id pointId[connectivitySize] = { 0, 1, 3, 2, 3, 1, 4, 5, 0, 1, 0, 5, 7, 8, 6, + 9, 6, 8, 0, 10, 7, 11, 7, 10, 0, 6, 13, 12, 13, 6, + 1, 5, 14, 1, 14, 2, 0, 3, 15, 0, 13, 4, 6, 16, 12, + 6, 9, 16, 7, 11, 8, 0, 15, 10, 7, 6, 0 }; + + // create DataSet + vtkm::cont::DataSet dataSet; + dataSet.AddCoordinateSystem( + vtkm::cont::make_CoordinateSystem("coordinates", coordinates, nVerts, vtkm::CopyFlag::On)); + + vtkm::cont::ArrayHandle connectivity; + connectivity.Allocate(connectivitySize); + + for (vtkm::Id i = 0; i < connectivitySize; ++i) + { + connectivity.WritePortal().Set(i, pointId[i]); + } + vtkm::cont::CellSetSingleType<> cellSet; + cellSet.Fill(nVerts, vtkm::CELL_SHAPE_TRIANGLE, 3, connectivity); + dataSet.SetCellSet(cellSet); + + std::vector pointvar(nVerts); + std::iota(pointvar.begin(), pointvar.end(), 15.f); + std::vector cellvar(connectivitySize / 3); + std::iota(cellvar.begin(), cellvar.end(), 132.f); + + vtkm::cont::ArrayHandle pointvec; + pointvec.Allocate(nVerts); + SetPortal(pointvec.WritePortal()); + + vtkm::cont::ArrayHandle cellvec; + cellvec.Allocate(connectivitySize / 3); + SetPortal(cellvec.WritePortal()); + + dataSet.AddPointField("pointvar", pointvar); + dataSet.AddCellField("cellvar", cellvar); + dataSet.AddPointField("point_vectors", pointvec); + dataSet.AddCellField("cell_vectors", cellvec); + + return dataSet; +} + +inline vtkm::cont::DataSet Make3DRectilinearDataSet0() +{ + vtkm::cont::DataSetBuilderRectilinear dsb; + std::vector X(3), Y(2), Z(3); + + X[0] = 0.0f; + X[1] = 1.0f; + X[2] = 2.0f; + Y[0] = 0.0f; + Y[1] = 1.0f; + Z[0] = 0.0f; + Z[1] = 1.0f; + Z[2] = 2.0f; + + vtkm::cont::DataSet dataSet = dsb.Create(X, Y, Z); + + const vtkm::Id nVerts = 18; + vtkm::Float32 var[nVerts]; + for (int i = 0; i < nVerts; i++) + var[i] = (vtkm::Float32)i; + dataSet.AddPointField("pointvar", var, nVerts); + + const vtkm::Id nCells = 4; + vtkm::Float32 cellvar[nCells]; + for (int i = 0; i < nCells; i++) + cellvar[i] = (vtkm::Float32)i; + dataSet.AddCellField("cellvar", cellvar, nCells); + + return dataSet; +} + + +#endif From 00b078821ff34a15ff15a11b8c24308116616da2 Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Thu, 19 Jan 2023 22:04:05 -0500 Subject: [PATCH 02/11] A few tweaks here and there.... More to come.. --- vtkm/rendering/CMakeLists.txt | 1 + vtkm/rendering/compositing/Compositor.cxx | 14 ++-- .../compositing/DirectSendCompositor.cxx | 72 +++++++++++++------ .../compositing/DirectSendCompositor.h | 40 ++++++++--- 4 files changed, 86 insertions(+), 41 deletions(-) diff --git a/vtkm/rendering/CMakeLists.txt b/vtkm/rendering/CMakeLists.txt index 8389c2caf..302b6542d 100644 --- a/vtkm/rendering/CMakeLists.txt +++ b/vtkm/rendering/CMakeLists.txt @@ -92,6 +92,7 @@ set(sources raytracing/TriangleExtractor.cxx compositing/Compositor.cxx + compositing/DirectSendCompositor.cxx compositing/Image.cxx compositing/PNGEncoder.cxx compositing/RadixKCompositor.cxx diff --git a/vtkm/rendering/compositing/Compositor.cxx b/vtkm/rendering/compositing/Compositor.cxx index 3fd314950..94e365787 100644 --- a/vtkm/rendering/compositing/Compositor.cxx +++ b/vtkm/rendering/compositing/Compositor.cxx @@ -17,9 +17,9 @@ #ifdef VTKM_ENABLE_MPI #include -#include -//#include +#include #include +#include #endif namespace vtkm @@ -189,14 +189,10 @@ void Compositor::CompositeVisOrder() { #ifdef VTKM_ENABLE_MPI - /* - vtkhdiy::mpi::communicator diy_comm; - diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle())); - + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); assert(m_images.size() != 0); - DirectSendCompositor compositor; - compositor.CompositeVolume(diy_comm, this->m_images); - */ + vtkm::rendering::compositing::DirectSendCompositor compositor; + compositor.CompositeVolume(comm, this->m_images); #else vtkm::rendering::compositing::ImageCompositor compositor; compositor.OrderedComposite(m_images); diff --git a/vtkm/rendering/compositing/DirectSendCompositor.cxx b/vtkm/rendering/compositing/DirectSendCompositor.cxx index 56d5e70da..f6638d41a 100644 --- a/vtkm/rendering/compositing/DirectSendCompositor.cxx +++ b/vtkm/rendering/compositing/DirectSendCompositor.cxx @@ -1,5 +1,21 @@ -#include -#include +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include + +#include +#include +#include + +/* #include #include #include @@ -9,20 +25,27 @@ #include #include #include +*/ -namespace vtkh +namespace vtkm +{ +namespace rendering +{ +namespace compositing { +namespace internal +{ struct Redistribute { - typedef vtkhdiy::RegularDecomposer Decomposer; - const vtkhdiy::RegularDecomposer& m_decomposer; + typedef vtkmdiy::RegularDecomposer Decomposer; + const vtkmdiy::RegularDecomposer& m_decomposer; Redistribute(const Decomposer& decomposer) : m_decomposer(decomposer) { } - void operator()(void* v_block, const vtkhdiy::ReduceProxy& proxy) const + void operator()(void* v_block, const vtkmdiy::ReduceProxy& proxy) const { MultiImageBlock* block = static_cast(v_block); // @@ -34,15 +57,15 @@ struct Redistribute const int local_images = block->m_images.size(); if (proxy.in_link().size() == 0) { - std::map> outgoing; + std::map> outgoing; for (int i = 0; i < world_size; ++i) { - vtkhdiy::DiscreteBounds sub_image_bounds; + vtkmdiy::DiscreteBounds sub_image_bounds(3); m_decomposer.fill_bounds(sub_image_bounds, i); - vtkm::Bounds vtkm_sub_bounds = DIYBoundsToVTKM(sub_image_bounds); + vtkm::Bounds vtkm_sub_bounds = vtkh::DIYBoundsToVTKM(sub_image_bounds); - vtkhdiy::BlockID dest = proxy.out_link().target(i); + vtkmdiy::BlockID dest = proxy.out_link().target(i); outgoing[dest].resize(local_images); for (int img = 0; img < local_images; ++img) @@ -51,7 +74,7 @@ struct Redistribute } } //for - typename std::map>::iterator it; + typename std::map>::iterator it; for (it = outgoing.begin(); it != outgoing.end(); ++it) { proxy.enqueue(it->first, it->second); @@ -108,14 +131,16 @@ struct Redistribute } // operator }; +} //namespace internal + DirectSendCompositor::DirectSendCompositor() {} DirectSendCompositor::~DirectSendCompositor() {} -void DirectSendCompositor::CompositeVolume(vtkhdiy::mpi::communicator& diy_comm, +void DirectSendCompositor::CompositeVolume(vtkmdiy::mpi::communicator& diy_comm, std::vector& images) { - vtkhdiy::DiscreteBounds global_bounds = VTKMBoundsToDIY(images.at(0).m_orig_bounds); + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(images.at(0).m_orig_bounds); const int num_threads = 1; const int num_blocks = diy_comm.size(); @@ -126,38 +151,39 @@ void DirectSendCompositor::CompositeVolume(vtkhdiy::mpi::communicator& diy_comm, // so we isolate them within separate blocks // { - vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { + vtkmdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { ImageBlock* block = reinterpret_cast*>(b); delete block; }); // create an assigner with one block per rank - vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + vtkmdiy::ContiguousAssigner assigner(num_blocks, num_blocks); AddMultiImageBlock create(master, images, sub_image); const int dims = 2; - vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + vtkmdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); decomposer.decompose(diy_comm.rank(), assigner, create); - vtkhdiy::all_to_all(master, assigner, Redistribute(decomposer), magic_k); + vtkmdiy::all_to_all(master, assigner, internal::Redistribute(decomposer), magic_k); } { - vtkhdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { + vtkmdiy::Master master(diy_comm, num_threads, -1, 0, [](void* b) { ImageBlock* block = reinterpret_cast*>(b); delete block; }); - vtkhdiy::ContiguousAssigner assigner(num_blocks, num_blocks); + vtkmdiy::ContiguousAssigner assigner(num_blocks, num_blocks); const int dims = 2; - vtkhdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); + vtkmdiy::RegularDecomposer decomposer(dims, global_bounds, num_blocks); AddImageBlock all_create(master, sub_image); decomposer.decompose(diy_comm.rank(), assigner, all_create); - MPI_Barrier(diy_comm); + diy_comm.barrier(); + //MPI_Barrier(diy_comm); //MPICollect(sub_image,diy_comm); - vtkhdiy::all_to_all(master, assigner, CollectImages(decomposer), magic_k); + vtkmdiy::all_to_all(master, assigner, CollectImages(decomposer), magic_k); } images.at(0).Swap(sub_image); @@ -171,3 +197,5 @@ std::string DirectSendCompositor::GetTimingString() } } +} +} //namespace vtkm::rendering::compositing diff --git a/vtkm/rendering/compositing/DirectSendCompositor.h b/vtkm/rendering/compositing/DirectSendCompositor.h index 2372ff42f..dac0cad1c 100644 --- a/vtkm/rendering/compositing/DirectSendCompositor.h +++ b/vtkm/rendering/compositing/DirectSendCompositor.h @@ -1,24 +1,44 @@ -#ifndef VTKH_DIY_DIRECT_SEND_HPP -#define VTKH_DIY_DIRECT_SEND_HPP +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ -#include -#include -#include +#ifndef vtk_m_rendering_compositing_DirectSendCompositor_h +#define vtk_m_rendering_compositing_DirectSendCompositor_h -namespace vtkh +#include + +#include + +#include + +namespace vtkm +{ +namespace rendering +{ +namespace compositing { -class DirectSendCompositor +class VTKM_RENDERING_EXPORT DirectSendCompositor { public: DirectSendCompositor(); ~DirectSendCompositor(); - void CompositeVolume(vtkhdiy::mpi::communicator& diy_comm, std::vector& images); + void CompositeVolume(vtkmdiy::mpi::communicator& diy_comm, + std::vector& images); std::string GetTimingString(); private: std::stringstream m_timing_log; }; -} // namespace vtkh -#endif +} +} +} //namespace vtkm::rendering::compositing + +#endif //vtk_m_rendering_compositing_DirectSendCompositor_h From 93bbe46073e0166bee280b95dac0bc57c2383f3a Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Tue, 24 Jan 2023 14:25:39 -0500 Subject: [PATCH 03/11] Add origin to Tangle, and use it for par render. --- .../compositing/PayloadCompositor.cxx | 48 +++++++++++++------ .../rendering/compositing/PayloadCompositor.h | 27 +++++++++-- vtkm/rendering/testing/CMakeLists.txt | 3 +- .../testing/UnitTestImageCompositing.cxx | 43 ++++------------- vtkm/source/Tangle.cxx | 3 +- vtkm/source/Tangle.h | 4 ++ 6 files changed, 72 insertions(+), 56 deletions(-) diff --git a/vtkm/rendering/compositing/PayloadCompositor.cxx b/vtkm/rendering/compositing/PayloadCompositor.cxx index 6b51fff9e..54e23602f 100644 --- a/vtkm/rendering/compositing/PayloadCompositor.cxx +++ b/vtkm/rendering/compositing/PayloadCompositor.cxx @@ -1,19 +1,34 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include #include #include +#include #include #include -#ifdef VTKH_PARALLEL -#include +#ifdef VTKM_ENABLE_MPI #include -#include -#include +//#include +#include +//#include #endif -using namespace vtkm::rendering::compositing; -namespace vtkh +namespace vtkm +{ +namespace rendering +{ +namespace compositing { PayloadCompositor::PayloadCompositor() {} @@ -23,7 +38,7 @@ void PayloadCompositor::ClearImages() m_images.clear(); } -void PayloadCompositor::AddImage(PayloadImage& image) +void PayloadCompositor::AddImage(vtkm::rendering::compositing::PayloadImage& image) { assert(image.GetNumberOfPixels() != 0); @@ -36,27 +51,30 @@ void PayloadCompositor::AddImage(PayloadImage& image) // // Do local composite and keep a single image // - PayloadImageCompositor compositor; + vtkm::rendering::compositing::PayloadImageCompositor compositor; compositor.ZBufferComposite(m_images[0], image); } } -PayloadImage PayloadCompositor::Composite() +vtkm::rendering::compositing::PayloadImage PayloadCompositor::Composite() { assert(m_images.size() != 0); // nothing to do here in serial. Images were composited as // they were added to the compositor -#ifdef VTKH_PARALLEL - vtkhdiy::mpi::communicator diy_comm; - diy_comm = vtkhdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle())); +#ifdef VTKM_ENABLE_MPI + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + // vtkmdiy::mpi::communicator diy_comm; + // diy_comm = vtkmdiy::mpi::communicator(MPI_Comm_f2c(GetMPICommHandle())); assert(m_images.size() == 1); - RadixKCompositor compositor; - compositor.CompositeSurface(diy_comm, this->m_images[0]); + vtkm::rendering::compositing::RadixKCompositor compositor; + compositor.CompositeSurface(comm, this->m_images[0]); #endif // Make this a param to avoid the copy? return m_images[0]; } -} // namespace vtkh +} +} +} // namespace vtkm:rendering::compositing diff --git a/vtkm/rendering/compositing/PayloadCompositor.h b/vtkm/rendering/compositing/PayloadCompositor.h index 96f7057d3..3884d20db 100644 --- a/vtkm/rendering/compositing/PayloadCompositor.h +++ b/vtkm/rendering/compositing/PayloadCompositor.h @@ -1,11 +1,25 @@ -#ifndef VTKH_PAYLOAD_COMPOSITOR_HPP -#define VTKH_PAYLOAD_COMPOSITOR_HPP +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#ifndef vtk_m_rendering_compositing_PayloadCompositor_h +#define vtk_m_rendering_compositing_PayloadCompositor_h #include #include -namespace vtkh +namespace vtkm +{ +namespace rendering +{ +namespace compositing { class VTKM_RENDERING_EXPORT PayloadCompositor @@ -23,6 +37,9 @@ protected: std::vector m_images; }; -}; +} +} +} // namespace vtkm:rendering::compositing -#endif + +#endif //vtk_m_rendering_compositing_PayloadCompositor_h diff --git a/vtkm/rendering/testing/CMakeLists.txt b/vtkm/rendering/testing/CMakeLists.txt index b607c71a3..81fa0e5d5 100644 --- a/vtkm/rendering/testing/CMakeLists.txt +++ b/vtkm/rendering/testing/CMakeLists.txt @@ -25,7 +25,7 @@ set(unit_tests UnitTestMapperGlyphVector.cxx ) -vtkm_unit_tests(SOURCES ${unit_tests}) +vtkm_unit_tests(SOURCES ${unit_tests} LIBRARIES vtkm_source) #add distributed tests i.e.test to run with MPI #if MPI is enabled. @@ -36,6 +36,7 @@ if (VTKm_ENABLE_MPI) vtkm_unit_tests( MPI DEVICE_SOURCES ${mpi_unit_tests} + LIBRARIES vtkm_source USE_VTKM_JOB_POOL ) endif() diff --git a/vtkm/rendering/testing/UnitTestImageCompositing.cxx b/vtkm/rendering/testing/UnitTestImageCompositing.cxx index f4b3fc3a0..33658a2d4 100644 --- a/vtkm/rendering/testing/UnitTestImageCompositing.cxx +++ b/vtkm/rendering/testing/UnitTestImageCompositing.cxx @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -120,43 +121,21 @@ void TestRenderComposite() int numBlocks = comm.size() * 1; int rank = comm.rank(); + + //Create a sequence of datasets along the X direction. std::string fieldName = "tangle"; - std::string fname = ""; - if (comm.rank() == 0) - fname = "/home/dpn/tangle0.vtk"; - else - fname = "/home/dpn/tangle1.vtk"; - - vtkm::io::VTKDataSetReader reader(fname); - auto ds = reader.ReadDataSet(); - ds.PrintSummary(std::cout); - - - /* vtkm::source::Tangle tangle; + vtkm::Vec3f pt(1 * rank, 0, 0); tangle.SetPointDimensions({ 50, 50, 50 }); + tangle.SetOrigin(pt); vtkm::cont::DataSet ds = tangle.Execute(); - */ - - //auto ds = CreateTestData(rank, numBlocks, 32); - //auto fieldName = "point_data_Float32"; - - /* - vtkm::rendering::testing::RenderTestOptions options; - options.Mapper = vtkm::rendering::testing::MapperType::RayTracer; - options.AllowAnyDevice = false; - options.ColorTable = vtkm::cont::ColorTable::Preset::Inferno; - vtkm::rendering::testing::RenderTest(ds, "point_data_Float32", "rendering/raytracer/regular3D.png", options); - */ vtkm::rendering::Camera camera; - camera.SetLookAt(vtkm::Vec3f_32(0.5, 0.5, 0.5)); camera.SetLookAt(vtkm::Vec3f_32(1.0, 0.5, 0.5)); camera.SetViewUp(vtkm::make_Vec(0.f, 1.f, 0.f)); camera.SetClippingRange(1.f, 10.f); camera.SetFieldOfView(60.f); - camera.SetPosition(vtkm::Vec3f_32(1.5, 1.5, 1.5)); - camera.SetPosition(vtkm::Vec3f_32(3, 3, 3)); + camera.SetPosition(vtkm::Vec3f_32(-2, 1.75, 1.75)); vtkm::cont::ColorTable colorTable("inferno"); // Background color: @@ -171,18 +150,16 @@ void TestRenderComposite() vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg); view.Paint(); - if (comm.rank() == 0) - view.SaveAs("volume0.png"); - else - view.SaveAs("volume1.png"); - auto colors = &GetVTKMPointer(canvas.GetColorBuffer())[0][0]; auto depths = GetVTKMPointer(canvas.GetDepthBuffer()); vtkm::rendering::compositing::Compositor compositor; compositor.AddImage(colors, depths, width, height); auto res = compositor.Composite(); - res.Save("RESULT.png", { "" }); + if (comm.rank() == 0) + { + res.Save("RESULT.png", { "" }); + } } void RenderTests() diff --git a/vtkm/source/Tangle.cxx b/vtkm/source/Tangle.cxx index 79990ff90..daec4255f 100644 --- a/vtkm/source/Tangle.cxx +++ b/vtkm/source/Tangle.cxx @@ -78,13 +78,12 @@ vtkm::cont::DataSet Tangle::DoExecute() const vtkm::cont::ArrayHandle pointFieldArray; this->Invoke(tangle::TangleField{ cellDims, mins, maxs }, cellSet, pointFieldArray); - const vtkm::Vec3f origin(0.0f, 0.0f, 0.0f); const vtkm::Vec3f spacing(1.0f / static_cast(cellDims[0]), 1.0f / static_cast(cellDims[1]), 1.0f / static_cast(cellDims[2])); vtkm::cont::ArrayHandleUniformPointCoordinates coordinates( - this->PointDimensions, origin, spacing); + this->PointDimensions, this->Origin, spacing); dataSet.AddCoordinateSystem(vtkm::cont::CoordinateSystem("coordinates", coordinates)); dataSet.AddField(vtkm::cont::make_FieldPoint("tangle", pointFieldArray)); diff --git a/vtkm/source/Tangle.h b/vtkm/source/Tangle.h index d1b154ae9..69c379011 100644 --- a/vtkm/source/Tangle.h +++ b/vtkm/source/Tangle.h @@ -44,6 +44,9 @@ public: { } + VTKM_CONT vtkm::Vec3f GetOrigin() const { return this->Origin; } + VTKM_CONT void SetOrigin(vtkm::Vec3f& pt) { this->Origin = pt; } + VTKM_CONT vtkm::Id3 GetPointDimensions() const { return this->PointDimensions; } VTKM_CONT void SetPointDimensions(vtkm::Id3 dims) { this->PointDimensions = dims; } @@ -54,6 +57,7 @@ private: vtkm::cont::DataSet DoExecute() const override; vtkm::Id3 PointDimensions = { 16, 16, 16 }; + vtkm::Vec3f Origin = { 0, 0, 0 }; }; } //namespace source } //namespace vtkm From 27b0b4f25390fc9ce0e92dd17ee1786d75262a4b Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Mon, 30 Jan 2023 07:11:51 -0500 Subject: [PATCH 04/11] Put compositor into the renderer. --- vtkm/rendering/Canvas.cxx | 8 ++++ vtkm/rendering/View3D.cxx | 37 +++++++++++++++ vtkm/rendering/View3D.h | 8 ++++ vtkm/rendering/compositing/Compositor.cxx | 47 +++++++++++++++++++ vtkm/rendering/compositing/Compositor.h | 5 ++ vtkm/rendering/compositing/Image.h | 8 ++-- vtkm/rendering/testing/CMakeLists.txt | 2 +- .../testing/UnitTestImageCompositing.cxx | 38 ++++++++++----- .../testing/UnitTestMapperRayTracer.cxx | 20 +++++++- vtkm/rendering/testlib/RenderTest.cxx | 5 ++ 10 files changed, 159 insertions(+), 19 deletions(-) diff --git a/vtkm/rendering/Canvas.cxx b/vtkm/rendering/Canvas.cxx index 921c6590d..6c9fcbf9b 100644 --- a/vtkm/rendering/Canvas.cxx +++ b/vtkm/rendering/Canvas.cxx @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -615,6 +616,13 @@ void Canvas::SetViewToScreenSpace(const vtkm::rendering::Camera& vtkmNotUsed(cam void Canvas::SaveAs(const std::string& fileName) const { + //Only rank 0 has the composited image. +#ifdef VTKM_ENABLE_MPI + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + if (comm.rank() != 0) + return; +#endif + this->RefreshColorBuffer(); ColorBufferType::ReadPortalType colorPortal = GetColorBuffer().ReadPortal(); vtkm::Id width = GetWidth(); diff --git a/vtkm/rendering/View3D.cxx b/vtkm/rendering/View3D.cxx index aa5076120..940e66ba4 100644 --- a/vtkm/rendering/View3D.cxx +++ b/vtkm/rendering/View3D.cxx @@ -8,6 +8,7 @@ // PURPOSE. See the above copyright notice for more information. //============================================================================ +#include #include namespace vtkm @@ -41,6 +42,42 @@ void View3D::Paint() this->GetCanvas().Clear(); this->RenderAnnotations(); this->GetScene().Render(this->GetMapper(), this->GetCanvas(), this->GetCamera()); + +#ifdef VTKM_ENABLE_MPI + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + if (comm.size() == 1) + return; + + this->Compositor.SetCompositeMode(vtkm::rendering::compositing::Compositor::Z_BUFFER_SURFACE); + /* + auto colors = (this->GetCanvas().GetColorBuffer().WritePortal().GetArray())[0][0]; + auto depths = (this->GetCanvas().GetDepthBuffer().WritePortal().GetArray()); + //auto colors = &GetVTKMPointer(this->GetCanvas().GetColorBuffer())[0][0]; + //auto depths = GetVTKMPointer(this->GetCanvas().GetDepthBuffer()); + */ + this->Compositor.AddImage(this->GetCanvas()); + auto result = this->Compositor.Composite(); + + //Rank 0 has the composited result, so put it into the Canvas. + if (comm.rank() == 0) + { + this->GetCanvas().Clear(); + auto colors = this->GetCanvas().GetColorBuffer(); + auto depths = this->GetCanvas().GetDepthBuffer(); + + int size = this->GetCanvas().GetWidth() * this->GetCanvas().GetHeight(); + for (int i = 0; i < size; i++) + { + const int offset = i * 4; + vtkm::Vec4f_32 rgba; + for (int j = 0; j < 4; j++) + rgba[j] = static_cast(result.m_pixels[offset + j] / 255.f); + + colors.WritePortal().Set(i, rgba); + depths.WritePortal().Set(i, result.m_depths[i]); + } + } +#endif } void View3D::RenderScreenAnnotations() diff --git a/vtkm/rendering/View3D.h b/vtkm/rendering/View3D.h index 2648f1676..e60de01e0 100644 --- a/vtkm/rendering/View3D.h +++ b/vtkm/rendering/View3D.h @@ -16,6 +16,10 @@ #include #include +#ifdef VTKM_ENABLE_MPI +#include +#endif + namespace vtkm { namespace rendering @@ -53,6 +57,10 @@ private: vtkm::rendering::AxisAnnotation3D YAxisAnnotation; vtkm::rendering::AxisAnnotation3D ZAxisAnnotation; vtkm::rendering::ColorBarAnnotation ColorBarAnnotation; + +#ifdef VTKM_ENABLE_MPI + vtkm::rendering::compositing::Compositor Compositor; +#endif }; } } // namespace vtkm::rendering diff --git a/vtkm/rendering/compositing/Compositor.cxx b/vtkm/rendering/compositing/Compositor.cxx index 94e365787..9dcd0ae8e 100644 --- a/vtkm/rendering/compositing/Compositor.cxx +++ b/vtkm/rendering/compositing/Compositor.cxx @@ -48,6 +48,51 @@ void Compositor::ClearImages() m_images.clear(); } +void Compositor::AddImage(vtkm::rendering::Canvas& canvas) +{ + auto colors = &(canvas.GetColorBuffer().ReadPortal().GetArray()[0][0]); + auto depths = canvas.GetDepthBuffer().ReadPortal().GetArray(); + vtkm::Id width = canvas.GetWidth(); + vtkm::Id height = canvas.GetHeight(); + + assert(m_composite_mode != VIS_ORDER_BLEND); + assert(depths != NULL); + Image image; + if (m_images.size() == 0) + { + m_images.push_back(image); + m_images[0].Init(colors, depths, width, height); + //m_images[0].Save("first.png"); + } + else if (m_composite_mode == Z_BUFFER_SURFACE) + { + // + // Do local composite and keep a single image + // + image.Init(colors, depths, width, height); + vtkm::rendering::compositing::ImageCompositor compositor; + compositor.ZBufferComposite(m_images[0], image); + } + else + { + const size_t image_index = m_images.size(); + m_images.push_back(image); + m_images[image_index].Init(colors, depths, width, height); + } +} + + +/* +void Compositor::AddImage(const vtkm::cont::ArrayHandle>& colors, + const vtkm::cont::ArrayHandle& depths, + vtkm::Id width, + vtkm::Id height) +{ + auto c = colors.WritePortal().GetArray(); + auto d = depths.WritePortal().GetArray(); + this->AddImage(c, d, width, height); +} + void Compositor::AddImage(const unsigned char* color_buffer, const float* depth_buffer, const int width, @@ -123,6 +168,7 @@ void Compositor::AddImage(const unsigned char* color_buffer, m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } + void Compositor::AddImage(const float* color_buffer, const float* depth_buffer, const int width, @@ -136,6 +182,7 @@ void Compositor::AddImage(const float* color_buffer, m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } +*/ Image Compositor::Composite() { diff --git a/vtkm/rendering/compositing/Compositor.h b/vtkm/rendering/compositing/Compositor.h index 6db06babc..29bf00f04 100644 --- a/vtkm/rendering/compositing/Compositor.h +++ b/vtkm/rendering/compositing/Compositor.h @@ -13,6 +13,7 @@ #include +#include #include #ifdef VTKM_ENABLE_MPI @@ -43,6 +44,9 @@ public: void ClearImages(); + void AddImage(vtkm::rendering::Canvas& canvas); + + /* void AddImage(const unsigned char* color_buffer, const float* depth_buffer, const int width, @@ -64,6 +68,7 @@ public: const int width, const int height, const int vis_order); +*/ Image Composite(); diff --git a/vtkm/rendering/compositing/Image.h b/vtkm/rendering/compositing/Image.h index 2ce6b8e20..f190e2623 100644 --- a/vtkm/rendering/compositing/Image.h +++ b/vtkm/rendering/compositing/Image.h @@ -85,8 +85,8 @@ struct VTKM_RENDERING_EXPORT Image void Init(const float* color_buffer, const float* depth_buffer, - int width, - int height, + vtkm::Id width, + vtkm::Id height, int composite_order = -1) { m_composite_order = composite_order; @@ -120,8 +120,8 @@ struct VTKM_RENDERING_EXPORT Image void Init(const unsigned char* color_buffer, const float* depth_buffer, - int width, - int height, + vtkm::Id width, + vtkm::Id height, int composite_order = -1) { m_composite_order = composite_order; diff --git a/vtkm/rendering/testing/CMakeLists.txt b/vtkm/rendering/testing/CMakeLists.txt index 81fa0e5d5..ddfa12315 100644 --- a/vtkm/rendering/testing/CMakeLists.txt +++ b/vtkm/rendering/testing/CMakeLists.txt @@ -25,7 +25,7 @@ set(unit_tests UnitTestMapperGlyphVector.cxx ) -vtkm_unit_tests(SOURCES ${unit_tests} LIBRARIES vtkm_source) +vtkm_unit_tests(SOURCES ${unit_tests} LIBRARIES vtkm_source vtkm_filter_field_transform) #add distributed tests i.e.test to run with MPI #if MPI is enabled. diff --git a/vtkm/rendering/testing/UnitTestImageCompositing.cxx b/vtkm/rendering/testing/UnitTestImageCompositing.cxx index 33658a2d4..e08c0ba7e 100644 --- a/vtkm/rendering/testing/UnitTestImageCompositing.cxx +++ b/vtkm/rendering/testing/UnitTestImageCompositing.cxx @@ -68,6 +68,7 @@ vtkm::rendering::compositing::Image ConstImage(const std::size_t& width, void TestImageComposite() { +#if 0 auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); std::size_t width = 4, height = 4; @@ -107,6 +108,7 @@ void TestImageComposite() std::cout << res.m_depths[i] << std::endl; } } +#endif } void TestRenderComposite() @@ -120,15 +122,7 @@ void TestRenderComposite() int numBlocks = comm.size() * 1; int rank = comm.rank(); - - - //Create a sequence of datasets along the X direction. - std::string fieldName = "tangle"; - vtkm::source::Tangle tangle; - vtkm::Vec3f pt(1 * rank, 0, 0); - tangle.SetPointDimensions({ 50, 50, 50 }); - tangle.SetOrigin(pt); - vtkm::cont::DataSet ds = tangle.Execute(); + int dsPerRank = 2; vtkm::rendering::Camera camera; camera.SetLookAt(vtkm::Vec3f_32(1.0, 0.5, 0.5)); @@ -140,16 +134,33 @@ void TestRenderComposite() // Background color: vtkm::rendering::Color bg(0.2f, 0.2f, 0.2f, 1.0f); - vtkm::rendering::Actor actor( - ds.GetCellSet(), ds.GetCoordinateSystem(), ds.GetField(fieldName), colorTable); vtkm::rendering::Scene scene; - scene.AddActor(actor); int width = 512, height = 512; CanvasRayTracer canvas(width, height); - vtkm::rendering::View3D view(scene, MapperVolume(), canvas, camera, bg); + for (int i = 0; i < dsPerRank; i++) + { + //Create a sequence of datasets along the X direction. + std::string fieldName = "tangle"; + vtkm::source::Tangle tangle; + vtkm::Vec3f pt(rank * dsPerRank + i, 0, 0); + if (rank == 1) + std::cout << "PT= " << pt << std::endl; + tangle.SetPointDimensions({ 50, 50, 50 }); + tangle.SetOrigin(pt); + vtkm::cont::DataSet ds = tangle.Execute(); + + vtkm::rendering::Actor actor( + ds.GetCellSet(), ds.GetCoordinateSystem(), ds.GetField(fieldName), colorTable); + scene.AddActor(actor); + } + + vtkm::rendering::View3D view(scene, MapperRayTracer(), canvas, camera, bg); view.Paint(); + canvas.SaveAs("result.png"); + + /* auto colors = &GetVTKMPointer(canvas.GetColorBuffer())[0][0]; auto depths = GetVTKMPointer(canvas.GetDepthBuffer()); @@ -160,6 +171,7 @@ void TestRenderComposite() { res.Save("RESULT.png", { "" }); } + */ } void RenderTests() diff --git a/vtkm/rendering/testing/UnitTestMapperRayTracer.cxx b/vtkm/rendering/testing/UnitTestMapperRayTracer.cxx index 25bd8271b..d4ab6fb23 100644 --- a/vtkm/rendering/testing/UnitTestMapperRayTracer.cxx +++ b/vtkm/rendering/testing/UnitTestMapperRayTracer.cxx @@ -17,6 +17,9 @@ #include #include + +#include + namespace { @@ -29,8 +32,23 @@ void RenderTests() options.AllowAnyDevice = false; options.ColorTable = vtkm::cont::ColorTable::Preset::Inferno; + + // vtkm::rendering::testing::RenderTest( + // maker.Make3DRegularDataSet0(), "pointvar", "rendering/raytracer/regular3D.png", options); + + auto ds0 = maker.Make3DRegularDataSet0(); + auto ds1 = maker.Make3DRegularDataSet0(); + + vtkm::filter::field_transform::PointTransform filter; + filter.SetTranslation({ -1, 2, 2 }); + filter.SetOutputFieldName("coordinates"); + filter.SetChangeCoordinateSystem(true); + auto res = filter.Execute(ds1); + vtkm::rendering::testing::RenderTest( - maker.Make3DRegularDataSet0(), "pointvar", "rendering/raytracer/regular3D.png", options); + { { ds0, "pointvar" }, { res, "pointvar" } }, "rendering/raytracer/regular3D.png", options); + + vtkm::rendering::testing::RenderTest(maker.Make3DRectilinearDataSet0(), "pointvar", "rendering/raytracer/rectilinear3D.png", diff --git a/vtkm/rendering/testlib/RenderTest.cxx b/vtkm/rendering/testlib/RenderTest.cxx index 894d9d999..1cc92b8bc 100644 --- a/vtkm/rendering/testlib/RenderTest.cxx +++ b/vtkm/rendering/testlib/RenderTest.cxx @@ -173,8 +173,11 @@ void DoRenderTest(vtkm::rendering::Canvas& canvas, vtkm::Range fieldRange; for (std::size_t dataFieldId = 0; dataFieldId < numFields; ++dataFieldId) { + std::cout << " AddActor: " << dataFieldId << std::endl; + vtkm::cont::DataSet dataSet = dataSetsFields[dataFieldId].first; std::string fieldName = dataSetsFields[dataFieldId].second; + dataSet.PrintSummary(std::cout); if (options.Colors.empty()) { scene.AddActor(vtkm::rendering::Actor(dataSet.GetCellSet(), @@ -280,6 +283,7 @@ void RenderTest(const vtkm::cont::DataSet& dataSet, const std::string& outputFile, const RenderTestOptions& options) { + std::cout << "RenderTest_1!!!" << std::endl; RenderTest({ { dataSet, fieldName } }, outputFile, options); } @@ -287,6 +291,7 @@ void RenderTest(const DataSetFieldVector& dataSetsFields, const std::string& outputFile, const RenderTestOptions& options) { + std::cout << "RenderTest_2!!!" << std::endl; std::unique_ptr deviceScope; if (options.AllowAnyDevice) { From 084f435e60674094f5c949f4b105bfcad8734c16 Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Mon, 30 Jan 2023 09:34:55 -0500 Subject: [PATCH 05/11] Fix compile error for double precision. --- vtkm/rendering/testing/UnitTestImageCompositing.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vtkm/rendering/testing/UnitTestImageCompositing.cxx b/vtkm/rendering/testing/UnitTestImageCompositing.cxx index e08c0ba7e..9616803b9 100644 --- a/vtkm/rendering/testing/UnitTestImageCompositing.cxx +++ b/vtkm/rendering/testing/UnitTestImageCompositing.cxx @@ -43,6 +43,7 @@ vtkm::cont::DataSet ReadDS(int rank) vtkm::io::VTKDataSetReader reader(vtkFile); } +#if 0 vtkm::rendering::compositing::Image ConstImage(const std::size_t& width, const std::size_t& height, const vtkm::Vec4f& rgba, @@ -65,6 +66,7 @@ vtkm::rendering::compositing::Image ConstImage(const std::size_t& width, return img; } +#endif void TestImageComposite() { From 727bb9f22f9ae311fa87cce2cde58f3e78e8ebb2 Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Mon, 30 Jan 2023 15:10:11 -0500 Subject: [PATCH 06/11] Renaming vars for coding standards. --- vtkm/rendering/View3D.cxx | 4 +- vtkm/rendering/compositing/Compositor.cxx | 102 ++++---- vtkm/rendering/compositing/Compositor.h | 4 +- .../compositing/DirectSendCompositor.cxx | 8 +- vtkm/rendering/compositing/Image.cxx | 16 +- vtkm/rendering/compositing/Image.h | 228 +++++++++--------- vtkm/rendering/compositing/ImageCompositor.h | 117 +++++---- vtkm/rendering/compositing/PayloadImage.cxx | 6 +- vtkm/rendering/compositing/PayloadImage.h | 168 ++++++------- .../compositing/PayloadImageCompositor.h | 30 +-- .../compositing/RadixKCompositor.cxx | 6 +- .../compositing/vtkm_diy_image_block.h | 128 +++++----- 12 files changed, 406 insertions(+), 411 deletions(-) diff --git a/vtkm/rendering/View3D.cxx b/vtkm/rendering/View3D.cxx index 940e66ba4..bc1998cd4 100644 --- a/vtkm/rendering/View3D.cxx +++ b/vtkm/rendering/View3D.cxx @@ -71,10 +71,10 @@ void View3D::Paint() const int offset = i * 4; vtkm::Vec4f_32 rgba; for (int j = 0; j < 4; j++) - rgba[j] = static_cast(result.m_pixels[offset + j] / 255.f); + rgba[j] = static_cast(result.Pixels[offset + j] / 255.f); colors.WritePortal().Set(i, rgba); - depths.WritePortal().Set(i, result.m_depths[i]); + depths.WritePortal().Set(i, result.Depths[i]); } } #endif diff --git a/vtkm/rendering/compositing/Compositor.cxx b/vtkm/rendering/compositing/Compositor.cxx index 9dcd0ae8e..23e309a4e 100644 --- a/vtkm/rendering/compositing/Compositor.cxx +++ b/vtkm/rendering/compositing/Compositor.cxx @@ -30,7 +30,7 @@ namespace compositing { Compositor::Compositor() - : m_composite_mode(Z_BUFFER_SURFACE) + : CompositingMode(Z_BUFFER_SURFACE) { } @@ -39,13 +39,13 @@ Compositor::~Compositor() {} void Compositor::SetCompositeMode(CompositeMode composite_mode) { // assure we don't have mixed image types - assert(m_images.size() == 0); - m_composite_mode = composite_mode; + assert(this->Images.size() == 0); + this->CompositingMode = composite_mode; } void Compositor::ClearImages() { - m_images.clear(); + this->Images.clear(); } void Compositor::AddImage(vtkm::rendering::Canvas& canvas) @@ -55,29 +55,29 @@ void Compositor::AddImage(vtkm::rendering::Canvas& canvas) vtkm::Id width = canvas.GetWidth(); vtkm::Id height = canvas.GetHeight(); - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depths != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(colors, depths, width, height); - //m_images[0].Save("first.png"); + this->Images.push_back(image); + this->Images[0].Init(colors, depths, width, height); + //this->Images[0].Save("first.png"); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image // image.Init(colors, depths, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(colors, depths, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(colors, depths, width, height); } } @@ -98,29 +98,29 @@ void Compositor::AddImage(const unsigned char* color_buffer, const int width, const int height) { - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depth_buffer != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(color_buffer, depth_buffer, width, height); - //m_images[0].Save("first.png"); + this->Images.push_back(image); + this->Images[0].Init(color_buffer, depth_buffer, width, height); + //this->Images[0].Save("first.png"); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image // image.Init(color_buffer, depth_buffer, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height); } } @@ -129,15 +129,15 @@ void Compositor::AddImage(const float* color_buffer, const int width, const int height) { - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depth_buffer != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(color_buffer, depth_buffer, width, height); + this->Images.push_back(image); + this->Images[0].Init(color_buffer, depth_buffer, width, height); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image @@ -145,13 +145,13 @@ void Compositor::AddImage(const float* color_buffer, image.Init(color_buffer, depth_buffer, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height); } } @@ -161,11 +161,11 @@ void Compositor::AddImage(const unsigned char* color_buffer, const int height, const int vis_order) { - assert(m_composite_mode == VIS_ORDER_BLEND); + assert(this->CompositingMode == VIS_ORDER_BLEND); Image image; - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } @@ -175,33 +175,33 @@ void Compositor::AddImage(const float* color_buffer, const int height, const int vis_order) { - assert(m_composite_mode == VIS_ORDER_BLEND); + assert(this->CompositingMode == VIS_ORDER_BLEND); Image image; - const size_t image_index = m_images.size(); - m_images.push_back(image); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } */ Image Compositor::Composite() { - assert(m_images.size() != 0); + assert(this->Images.size() != 0); - if (m_composite_mode == Z_BUFFER_SURFACE) + if (this->CompositingMode == Z_BUFFER_SURFACE) { CompositeZBufferSurface(); } - else if (m_composite_mode == Z_BUFFER_BLEND) + else if (this->CompositingMode == Z_BUFFER_BLEND) { CompositeZBufferBlend(); } - else if (m_composite_mode == VIS_ORDER_BLEND) + else if (this->CompositingMode == VIS_ORDER_BLEND) { CompositeVisOrder(); } // Make this a param to avoid the copy? - return m_images[0]; + return this->Images[0]; } void Compositor::Cleanup() {} @@ -220,9 +220,9 @@ void Compositor::CompositeZBufferSurface() #ifdef VTKM_ENABLE_MPI auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); - assert(m_images.size() == 1); + assert(this->Images.size() == 1); RadixKCompositor compositor; - compositor.CompositeSurface(comm, this->m_images[0]); + compositor.CompositeSurface(comm, this->Images[0]); m_log_stream << compositor.GetTimingString(); #endif } @@ -237,12 +237,12 @@ void Compositor::CompositeVisOrder() #ifdef VTKM_ENABLE_MPI auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); - assert(m_images.size() != 0); + assert(this->Images.size() != 0); vtkm::rendering::compositing::DirectSendCompositor compositor; - compositor.CompositeVolume(comm, this->m_images); + compositor.CompositeVolume(comm, this->Images); #else vtkm::rendering::compositing::ImageCompositor compositor; - compositor.OrderedComposite(m_images); + compositor.OrderedComposite(this->Images); #endif } diff --git a/vtkm/rendering/compositing/Compositor.h b/vtkm/rendering/compositing/Compositor.h index 29bf00f04..8ce85e853 100644 --- a/vtkm/rendering/compositing/Compositor.h +++ b/vtkm/rendering/compositing/Compositor.h @@ -97,8 +97,8 @@ protected: virtual void CompositeVisOrder(); std::stringstream m_log_stream; - CompositeMode m_composite_mode; - std::vector m_images; + CompositeMode CompositingMode; + std::vector Images; }; } diff --git a/vtkm/rendering/compositing/DirectSendCompositor.cxx b/vtkm/rendering/compositing/DirectSendCompositor.cxx index f6638d41a..344f92891 100644 --- a/vtkm/rendering/compositing/DirectSendCompositor.cxx +++ b/vtkm/rendering/compositing/DirectSendCompositor.cxx @@ -80,7 +80,7 @@ struct Redistribute proxy.enqueue(it->first, it->second); } } // if - else if (block->m_images.at(0).m_composite_order != -1) + else if (block->m_images.at(0).CompositeOrder != -1) { // blend images according to vis order std::vector images; @@ -103,8 +103,8 @@ struct Redistribute block->m_output.Swap(images[0]); } // else if - else if (block->m_images.at(0).m_composite_order == -1 && - block->m_images.at(0).HasTransparency()) + else if (block->m_images.at(0).CompositeOrder == -1 && + block->m_images.at(0).GetHasTransparency()) { std::vector images; for (int i = 0; i < proxy.in_link().size(); ++i) @@ -140,7 +140,7 @@ DirectSendCompositor::~DirectSendCompositor() {} void DirectSendCompositor::CompositeVolume(vtkmdiy::mpi::communicator& diy_comm, std::vector& images) { - vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(images.at(0).m_orig_bounds); + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(images.at(0).OrigBounds); const int num_threads = 1; const int num_blocks = diy_comm.size(); diff --git a/vtkm/rendering/compositing/Image.cxx b/vtkm/rendering/compositing/Image.cxx index 8e1c838b5..02fa20f93 100644 --- a/vtkm/rendering/compositing/Image.cxx +++ b/vtkm/rendering/compositing/Image.cxx @@ -12,20 +12,20 @@ namespace compositing void Image::Save(const std::string& name, const std::vector& comments) { - PNGEncoder encoder; - encoder.Encode(&m_pixels[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + vtkm::rendering::compositing::PNGEncoder encoder; + encoder.Encode(&this->Pixels[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } void Image::Save(const std::string& name, const std::vector& comments) const { - PNGEncoder encoder; - encoder.Encode(&m_pixels[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + vtkm::rendering::compositing::PNGEncoder encoder; + encoder.Encode(&this->Pixels[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } diff --git a/vtkm/rendering/compositing/Image.h b/vtkm/rendering/compositing/Image.h index f190e2623..523069b0f 100644 --- a/vtkm/rendering/compositing/Image.h +++ b/vtkm/rendering/compositing/Image.h @@ -28,60 +28,60 @@ struct VTKM_RENDERING_EXPORT Image { // The image bounds are indicated by a grid starting at // 1-width and 1-height. Actual width would be calculated - // m_bounds.X.Max - m_bounds.X.Min + 1 + // Bounds.X.Max - Bounds.X.Min + 1 // 1024 - 1 + 1 = 1024 - vtkm::Bounds m_orig_bounds; - vtkm::Bounds m_bounds; - std::vector m_pixels; - std::vector m_depths; - int m_orig_rank; - bool m_has_transparency; - int m_composite_order; + vtkm::Bounds OrigBounds; + vtkm::Bounds Bounds; + std::vector Pixels; + std::vector Depths; + int OrigRank; + bool HasTransparency; + int CompositeOrder; Image() - : m_orig_rank(-1) - , m_has_transparency(false) - , m_composite_order(-1) + : OrigRank(-1) + , HasTransparency(false) + , CompositeOrder(-1) { } Image(const vtkm::Bounds& bounds) - : m_orig_bounds(bounds) - , m_bounds(bounds) - , m_orig_rank(-1) - , m_has_transparency(false) - , m_composite_order(-1) + : OrigBounds(bounds) + , Bounds(bounds) + , OrigRank(-1) + , HasTransparency(false) + , CompositeOrder(-1) { const int dx = bounds.X.Max - bounds.X.Min + 1; const int dy = bounds.Y.Max - bounds.Y.Min + 1; - m_pixels.resize(dx * dy * 4); - m_depths.resize(dx * dy); + this->Pixels.resize(dx * dy * 4); + this->Depths.resize(dx * dy); } // init this image based on the original bounds // of the other image void InitOriginal(const Image& other) { - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_orig_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.OrigBounds; - const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; - m_pixels.resize(dx * dy * 4); - m_depths.resize(dx * dy); + const int dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; + this->Pixels.resize(dx * dy * 4); + this->Depths.resize(dx * dy); - m_orig_rank = -1; - m_has_transparency = false; - m_composite_order = -1; + this->OrigRank = -1; + this->HasTransparency = false; + this->CompositeOrder = -1; } - int GetNumberOfPixels() const { return static_cast(m_pixels.size() / 4); } + int GetNumberOfPixels() const { return static_cast(this->Pixels.size() / 4); } - void SetHasTransparency(bool has_transparency) { m_has_transparency = has_transparency; } + void SetHasTransparency(bool has_transparency) { this->HasTransparency = has_transparency; } - bool HasTransparency() { return m_has_transparency; } + bool GetHasTransparency() { return this->HasTransparency; } void Init(const float* color_buffer, const float* depth_buffer, @@ -89,15 +89,15 @@ struct VTKM_RENDERING_EXPORT Image vtkm::Id height, int composite_order = -1) { - m_composite_order = composite_order; - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->CompositeOrder = composite_order; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_pixels.resize(size * 4); - m_depths.resize(size); + this->Pixels.resize(size * 4); + this->Depths.resize(size); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -105,16 +105,16 @@ struct VTKM_RENDERING_EXPORT Image for (int i = 0; i < size; ++i) { const int offset = i * 4; - m_pixels[offset + 0] = static_cast(color_buffer[offset + 0] * 255.f); - m_pixels[offset + 1] = static_cast(color_buffer[offset + 1] * 255.f); - m_pixels[offset + 2] = static_cast(color_buffer[offset + 2] * 255.f); - m_pixels[offset + 3] = static_cast(color_buffer[offset + 3] * 255.f); + this->Pixels[offset + 0] = static_cast(color_buffer[offset + 0] * 255.f); + this->Pixels[offset + 1] = static_cast(color_buffer[offset + 1] * 255.f); + this->Pixels[offset + 2] = static_cast(color_buffer[offset + 2] * 255.f); + this->Pixels[offset + 3] = static_cast(color_buffer[offset + 3] * 255.f); float depth = depth_buffer[i]; //make sure we can do a single comparison on depth //deal with negative depth values //TODO: This may not be the best way depth = depth < 0 ? abs(depth) : depth; - m_depths[i] = depth; + this->Depths[i] = depth; } } @@ -124,18 +124,18 @@ struct VTKM_RENDERING_EXPORT Image vtkm::Id height, int composite_order = -1) { - m_composite_order = composite_order; - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->CompositeOrder = composite_order; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_pixels.resize(size * 4); - m_depths.resize(size); + this->Pixels.resize(size * 4); + this->Depths.resize(size); - std::copy(color_buffer, color_buffer + size * 4, &m_pixels[0]); + std::copy(color_buffer, color_buffer + size * 4, &this->Pixels[0]); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -145,7 +145,7 @@ struct VTKM_RENDERING_EXPORT Image float depth = depth_buffer[i]; //make sure we can do a single comparison on depth depth = depth < 0 ? 2.f : depth; - m_depths[i] = depth; + this->Depths[i] = depth; } // for } @@ -153,7 +153,7 @@ struct VTKM_RENDERING_EXPORT Image void CompositeBackground(const float* color) { - const int size = static_cast(m_pixels.size() / 4); + const int size = static_cast(this->Pixels.size() / 4); unsigned char bg_color[4]; for (int i = 0; i < 4; ++i) { @@ -166,12 +166,12 @@ struct VTKM_RENDERING_EXPORT Image for (int i = 0; i < size; ++i) { const int offset = i * 4; - unsigned int alpha = static_cast(m_pixels[offset + 3]); + unsigned int alpha = static_cast(this->Pixels[offset + 3]); const float opacity = (255 - alpha); - m_pixels[offset + 0] += static_cast(opacity * bg_color[0] / 255); - m_pixels[offset + 1] += static_cast(opacity * bg_color[1] / 255); - m_pixels[offset + 2] += static_cast(opacity * bg_color[2] / 255); - m_pixels[offset + 3] += static_cast(opacity * bg_color[3] / 255); + this->Pixels[offset + 0] += static_cast(opacity * bg_color[0] / 255); + this->Pixels[offset + 1] += static_cast(opacity * bg_color[1] / 255); + this->Pixels[offset + 2] += static_cast(opacity * bg_color[2] / 255); + this->Pixels[offset + 3] += static_cast(opacity * bg_color[3] / 255); } } // @@ -179,28 +179,28 @@ struct VTKM_RENDERING_EXPORT Image // void SubsetFrom(const Image& image, const vtkm::Bounds& sub_region) { - m_orig_bounds = image.m_orig_bounds; - m_bounds = sub_region; - m_orig_rank = image.m_orig_rank; - m_composite_order = image.m_composite_order; + this->OrigBounds = image.OrigBounds; + this->Bounds = sub_region; + this->OrigRank = image.OrigRank; + this->CompositeOrder = image.CompositeOrder; - assert(sub_region.X.Min >= image.m_bounds.X.Min); - assert(sub_region.Y.Min >= image.m_bounds.Y.Min); - assert(sub_region.X.Max <= image.m_bounds.X.Max); - assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + assert(sub_region.X.Min >= image.Bounds.X.Min); + assert(sub_region.Y.Min >= image.Bounds.Y.Min); + assert(sub_region.X.Max <= image.Bounds.X.Max); + assert(sub_region.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; const int end_y = start_y + s_dy; - m_pixels.resize(s_dx * s_dy * 4); - m_depths.resize(s_dx * s_dy); + this->Pixels.resize(s_dx * s_dy * 4); + this->Depths.resize(s_dx * s_dy); @@ -212,10 +212,10 @@ struct VTKM_RENDERING_EXPORT Image const int copy_to = (y - start_y) * s_dx; const int copy_from = y * dx + start_x; - std::copy(&image.m_pixels[copy_from * 4], - &image.m_pixels[copy_from * 4] + s_dx * 4, - &m_pixels[copy_to * 4]); - std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + std::copy(&image.Pixels[copy_from * 4], + &image.Pixels[copy_from * 4] + s_dx * 4, + &this->Pixels[copy_to * 4]); + std::copy(&image.Depths[copy_from], &image.Depths[copy_from] + s_dx, &this->Depths[copy_to]); } } @@ -230,17 +230,17 @@ struct VTKM_RENDERING_EXPORT Image int index = color % 3; c[index] = 255 - color * 11; ; - const int size = static_cast(m_pixels.size()); + const int size = static_cast(this->Pixels.size()); for (int i = 0; i < size; ++i) { - float d = m_depths[i / 4]; + float d = this->Depths[i / 4]; if (d > 0 && d < 1) { - m_pixels[i] = c[i % 4]; + this->Pixels[i] = c[i % 4]; } else { - m_pixels[i] = 155; + this->Pixels[i] = 155; } } } @@ -249,20 +249,20 @@ struct VTKM_RENDERING_EXPORT Image // void SubsetTo(Image& image) const { - image.m_composite_order = m_composite_order; - assert(m_bounds.X.Min >= image.m_bounds.X.Min); - assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); - assert(m_bounds.X.Max <= image.m_bounds.X.Max); - assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + image.CompositeOrder = this->CompositeOrder; + assert(this->Bounds.X.Min >= image.Bounds.X.Min); + assert(this->Bounds.Y.Min >= image.Bounds.Y.Min); + assert(this->Bounds.X.Max <= image.Bounds.X.Max); + assert(this->Bounds.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -272,44 +272,44 @@ struct VTKM_RENDERING_EXPORT Image const int copy_to = (y + start_y) * dx + start_x; const int copy_from = y * s_dx; - std::copy(&m_pixels[copy_from * 4], - &m_pixels[copy_from * 4] + s_dx * 4, - &image.m_pixels[copy_to * 4]); + std::copy(&this->Pixels[copy_from * 4], + &this->Pixels[copy_from * 4] + s_dx * 4, + &image.Pixels[copy_to * 4]); - std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + std::copy(&this->Depths[copy_from], &this->Depths[copy_from] + s_dx, &image.Depths[copy_to]); } } void Swap(Image& other) { - vtkm::Bounds orig = m_orig_bounds; - vtkm::Bounds bounds = m_bounds; + vtkm::Bounds orig = this->OrigBounds; + vtkm::Bounds bounds = this->Bounds; - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.Bounds; - other.m_orig_bounds = orig; - other.m_bounds = bounds; + other.OrigBounds = orig; + other.Bounds = bounds; - m_pixels.swap(other.m_pixels); - m_depths.swap(other.m_depths); + this->Pixels.swap(other.Pixels); + this->Depths.swap(other.Depths); } void Clear() { vtkm::Bounds empty; - m_orig_bounds = empty; - m_bounds = empty; - m_pixels.clear(); - m_depths.clear(); + this->OrigBounds = empty; + this->Bounds = empty; + this->Pixels.clear(); + this->Depths.clear(); } std::string ToString() const { std::stringstream ss; - ss << "Total size pixels " << (int)m_pixels.size() / 4; - ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; - ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ss << "Total size pixels " << (int)this->Pixels.size() / 4; + ss << " tile dims: {" << this->Bounds.X.Min << "," << this->Bounds.Y.Min << "} - "; + ss << "{" << this->Bounds.X.Max << "," << this->Bounds.Y.Max << "}\n"; ; return ss.str(); } @@ -322,7 +322,7 @@ struct CompositeOrderSort { inline bool operator()(const Image& lhs, const Image& rhs) const { - return lhs.m_composite_order < rhs.m_composite_order; + return lhs.CompositeOrder < rhs.CompositeOrder; } }; diff --git a/vtkm/rendering/compositing/ImageCompositor.h b/vtkm/rendering/compositing/ImageCompositor.h index 40fdf176c..ad8aff7dc 100644 --- a/vtkm/rendering/compositing/ImageCompositor.h +++ b/vtkm/rendering/compositing/ImageCompositor.h @@ -28,12 +28,11 @@ class VTKM_RENDERING_EXPORT ImageCompositor public: void Blend(vtkm::rendering::compositing::Image& front, vtkm::rendering::compositing::Image& back) { - - assert(front.m_bounds.X.Min == back.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == back.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == back.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == back.m_bounds.Y.Max); - const int size = static_cast(front.m_pixels.size() / 4); + assert(front.Bounds.X.Min == back.Bounds.X.Min); + assert(front.Bounds.Y.Min == back.Bounds.Y.Min); + assert(front.Bounds.X.Max == back.Bounds.X.Max); + assert(front.Bounds.Y.Max == back.Bounds.Y.Max); + const int size = static_cast(front.Pixels.size() / 4); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -41,52 +40,52 @@ public: for (int i = 0; i < size; ++i) { const int offset = i * 4; - unsigned int alpha = front.m_pixels[offset + 3]; + unsigned int alpha = front.Pixels[offset + 3]; const unsigned int opacity = 255 - alpha; - front.m_pixels[offset + 0] += - static_cast(opacity * back.m_pixels[offset + 0] / 255); - front.m_pixels[offset + 1] += - static_cast(opacity * back.m_pixels[offset + 1] / 255); - front.m_pixels[offset + 2] += - static_cast(opacity * back.m_pixels[offset + 2] / 255); - front.m_pixels[offset + 3] += - static_cast(opacity * back.m_pixels[offset + 3] / 255); + front.Pixels[offset + 0] += + static_cast(opacity * back.Pixels[offset + 0] / 255); + front.Pixels[offset + 1] += + static_cast(opacity * back.Pixels[offset + 1] / 255); + front.Pixels[offset + 2] += + static_cast(opacity * back.Pixels[offset + 2] / 255); + front.Pixels[offset + 3] += + static_cast(opacity * back.Pixels[offset + 3] / 255); - float d1 = std::min(front.m_depths[i], 1.001f); - float d2 = std::min(back.m_depths[i], 1.001f); + float d1 = std::min(front.Depths[i], 1.001f); + float d2 = std::min(back.Depths[i], 1.001f); float depth = std::min(d1, d2); - front.m_depths[i] = depth; + front.Depths[i] = depth; } } void ZBufferComposite(vtkm::rendering::compositing::Image& front, const vtkm::rendering::compositing::Image& image) { - assert(front.m_depths.size() == front.m_pixels.size() / 4); - assert(front.m_bounds.X.Min == image.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == image.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + assert(front.Depths.size() == front.Pixels.size() / 4); + assert(front.Bounds.X.Min == image.Bounds.X.Min); + assert(front.Bounds.Y.Min == image.Bounds.Y.Min); + assert(front.Bounds.X.Max == image.Bounds.X.Max); + assert(front.Bounds.Y.Max == image.Bounds.Y.Max); - const int size = static_cast(front.m_depths.size()); + const int size = static_cast(front.Depths.size()); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for #endif for (int i = 0; i < size; ++i) { - const float depth = image.m_depths[i]; - if (depth > 1.f || front.m_depths[i] < depth) + const float depth = image.Depths[i]; + if (depth > 1.f || front.Depths[i] < depth) { continue; } const int offset = i * 4; - front.m_depths[i] = abs(depth); - front.m_pixels[offset + 0] = image.m_pixels[offset + 0]; - front.m_pixels[offset + 1] = image.m_pixels[offset + 1]; - front.m_pixels[offset + 2] = image.m_pixels[offset + 2]; - front.m_pixels[offset + 3] = image.m_pixels[offset + 3]; + front.Depths[i] = abs(depth); + front.Pixels[offset + 0] = image.Pixels[offset + 0]; + front.Pixels[offset + 1] = image.Pixels[offset + 1]; + front.Pixels[offset + 2] = image.Pixels[offset + 2]; + front.Pixels[offset + 3] = image.Pixels[offset + 3]; } } @@ -111,19 +110,19 @@ public: struct Pixel { - unsigned char m_color[4]; - float m_depth; - int m_pixel_id; // local (sub-image) pixels id + unsigned char Color[4]; + float Depth; + int PixelId; // local (sub-image) pixels id bool operator<(const Pixel& other) const { - if (m_pixel_id != other.m_pixel_id) + if (this->PixelId != other.PixelId) { - return m_pixel_id < other.m_pixel_id; + return this->PixelId < other.PixelId; } else { - return m_depth < other.m_depth; + return this->Depth < other.Depth; } } }; @@ -147,12 +146,12 @@ public: for (int j = 0; j < image_size; ++j) { const int image_offset = j * 4; - pixels[offset + j].m_color[0] = images[i].m_pixels[image_offset + 0]; - pixels[offset + j].m_color[1] = images[i].m_pixels[image_offset + 1]; - pixels[offset + j].m_color[2] = images[i].m_pixels[image_offset + 2]; - pixels[offset + j].m_color[3] = images[i].m_pixels[image_offset + 3]; - pixels[offset + j].m_depth = images[i].m_depths[j]; - pixels[offset + j].m_pixel_id = j; + pixels[offset + j].Color[0] = images[i].Pixels[image_offset + 0]; + pixels[offset + j].Color[1] = images[i].Pixels[image_offset + 1]; + pixels[offset + j].Color[2] = images[i].Pixels[image_offset + 2]; + pixels[offset + j].Color[3] = images[i].Pixels[image_offset + 3]; + pixels[offset + j].Depth = images[i].Depths[j]; + pixels[offset + j].PixelId = j; } // for pixels } // for images } @@ -174,10 +173,10 @@ public: } // check to see if that worked - int pixel_id_0 = pixels[0].m_pixel_id; + int pixel_id_0 = pixels[0].PixelId; for (int i = 1; i < num_images; ++i) { - assert(pixel_id_0 == pixels[i].m_pixel_id); + assert(pixel_id_0 == pixels[i].PixelId); } @@ -190,27 +189,23 @@ public: Pixel pixel = pixels[index]; for (int j = 1; j < num_images; ++j) { - if (pixel.m_color[3] == 255 || pixel.m_depth > 1.f) + if (pixel.Color[3] == 255 || pixel.Depth > 1.f) { break; } - unsigned int alpha = pixel.m_color[3]; + unsigned int alpha = pixel.Color[3]; const unsigned int opacity = 255 - alpha; - pixel.m_color[0] += - static_cast(opacity * pixels[index + j].m_color[0] / 255); - pixel.m_color[1] += - static_cast(opacity * pixels[index + j].m_color[1] / 255); - pixel.m_color[2] += - static_cast(opacity * pixels[index + j].m_color[2] / 255); - pixel.m_color[3] += - static_cast(opacity * pixels[index + j].m_color[3] / 255); - pixel.m_depth = pixels[index + j].m_depth; + pixel.Color[0] += static_cast(opacity * pixels[index + j].Color[0] / 255); + pixel.Color[1] += static_cast(opacity * pixels[index + j].Color[1] / 255); + pixel.Color[2] += static_cast(opacity * pixels[index + j].Color[2] / 255); + pixel.Color[3] += static_cast(opacity * pixels[index + j].Color[3] / 255); + pixel.Depth = pixels[index + j].Depth; } // for each image - images[0].m_pixels[i * 4 + 0] = pixel.m_color[0]; - images[0].m_pixels[i * 4 + 1] = pixel.m_color[1]; - images[0].m_pixels[i * 4 + 2] = pixel.m_color[2]; - images[0].m_pixels[i * 4 + 3] = pixel.m_color[3]; - images[0].m_depths[i] = pixel.m_depth; + images[0].Pixels[i * 4 + 0] = pixel.Color[0]; + images[0].Pixels[i * 4 + 1] = pixel.Color[1]; + images[0].Pixels[i * 4 + 2] = pixel.Color[2]; + images[0].Pixels[i * 4 + 3] = pixel.Color[3]; + images[0].Depths[i] = pixel.Depth; } // for each pixel } }; diff --git a/vtkm/rendering/compositing/PayloadImage.cxx b/vtkm/rendering/compositing/PayloadImage.cxx index 10ff0f89d..9fb99f715 100644 --- a/vtkm/rendering/compositing/PayloadImage.cxx +++ b/vtkm/rendering/compositing/PayloadImage.cxx @@ -21,9 +21,9 @@ namespace compositing void PayloadImage::Save(const std::string& name, const std::vector& comments) { PNGEncoder encoder; - encoder.Encode(&m_payloads[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + encoder.Encode(&this->Payloads[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } diff --git a/vtkm/rendering/compositing/PayloadImage.h b/vtkm/rendering/compositing/PayloadImage.h index 501e82715..5bbbd87a6 100644 --- a/vtkm/rendering/compositing/PayloadImage.h +++ b/vtkm/rendering/compositing/PayloadImage.h @@ -28,62 +28,62 @@ struct VTKM_RENDERING_EXPORT PayloadImage { // The image bounds are indicated by a grid starting at // 1-width and 1-height. Actual width would be calculated - // m_bounds.X.Max - m_bounds.X.Min + 1 + // Bounds.X.Max - Bounds.X.Min + 1 // 1024 - 1 + 1 = 1024 - vtkm::Bounds m_orig_bounds; - vtkm::Bounds m_bounds; - std::vector m_payloads; - std::vector m_depths; - int m_orig_rank; - int m_payload_bytes; // Size of the payload in bytes - float m_default_value; + vtkm::Bounds OrigBounds; + vtkm::Bounds Bounds; + std::vector Payloads; + std::vector Depths; + int OrigRank; + int PayloadBytes; // Size of the payload in bytes + float DefaultValue; PayloadImage() {} PayloadImage(const vtkm::Bounds& bounds, const int payload_bytes) - : m_orig_bounds(bounds) - , m_bounds(bounds) - , m_orig_rank(-1) - , m_payload_bytes(payload_bytes) + : OrigBounds(bounds) + , Bounds(bounds) + , OrigRank(-1) + , PayloadBytes(payload_bytes) { - m_default_value = vtkm::Nan32(); + DefaultValue = vtkm::Nan32(); const int dx = bounds.X.Max - bounds.X.Min + 1; const int dy = bounds.Y.Max - bounds.Y.Min + 1; - m_payloads.resize(dx * dy * m_payload_bytes); - m_depths.resize(dx * dy); + this->Payloads.resize(dx * dy * this->PayloadBytes); + this->Depths.resize(dx * dy); } void InitOriginal(const PayloadImage& other) { - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_orig_bounds; - m_payload_bytes = other.m_payload_bytes; - m_default_value = other.m_default_value; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.OrigBounds; + this->PayloadBytes = other.PayloadBytes; + this->DefaultValue = other.DefaultValue; - const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; - m_payloads.resize(dx * dy * m_payload_bytes); - m_depths.resize(dx * dy); + const int dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; + this->Payloads.resize(dx * dy * this->PayloadBytes); + this->Depths.resize(dx * dy); - m_orig_rank = -1; + this->OrigRank = -1; } - int GetNumberOfPixels() const { return static_cast(m_depths.size()); } + int GetNumberOfPixels() const { return static_cast(this->Depths.size()); } void Init(const unsigned char* payload_buffer, const float* depth_buffer, int width, int height) { - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_payloads.resize(size * m_payload_bytes); - m_depths.resize(size); + this->Payloads.resize(size * this->PayloadBytes); + this->Depths.resize(size); - std::copy(payload_buffer, payload_buffer + size * m_payload_bytes, &m_payloads[0]); + std::copy(payload_buffer, payload_buffer + size * this->PayloadBytes, &this->Payloads[0]); - std::copy(depth_buffer, depth_buffer + size, &m_depths[0]); + std::copy(depth_buffer, depth_buffer + size, &this->Depths[0]); } // @@ -91,30 +91,30 @@ struct VTKM_RENDERING_EXPORT PayloadImage // void SubsetFrom(const PayloadImage& image, const vtkm::Bounds& sub_region) { - m_orig_bounds = image.m_orig_bounds; - m_bounds = sub_region; - m_orig_rank = image.m_orig_rank; - m_payload_bytes = image.m_payload_bytes; + this->OrigBounds = image.OrigBounds; + this->Bounds = sub_region; + this->OrigRank = image.OrigRank; + this->PayloadBytes = image.PayloadBytes; - assert(sub_region.X.Min >= image.m_bounds.X.Min); - assert(sub_region.Y.Min >= image.m_bounds.Y.Min); - assert(sub_region.X.Max <= image.m_bounds.X.Max); - assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + assert(sub_region.X.Min >= image.Bounds.X.Min); + assert(sub_region.Y.Min >= image.Bounds.Y.Min); + assert(sub_region.X.Max <= image.Bounds.X.Max); + assert(sub_region.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; const int end_y = start_y + s_dy; - size_t buffer_size = s_dx * s_dy * m_payload_bytes; + size_t buffer_size = s_dx * s_dy * this->PayloadBytes; - m_payloads.resize(buffer_size); - m_depths.resize(s_dx * s_dy); + this->Payloads.resize(buffer_size); + this->Depths.resize(s_dx * s_dy); #ifdef VTKH_OPENMP_ENABLED @@ -125,10 +125,10 @@ struct VTKM_RENDERING_EXPORT PayloadImage const int copy_to = (y - start_y) * s_dx; const int copy_from = y * dx + start_x; - std::copy(&image.m_payloads[copy_from * m_payload_bytes], - &image.m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, - &m_payloads[copy_to * m_payload_bytes]); - std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + std::copy(&image.Payloads[copy_from * this->PayloadBytes], + &image.Payloads[copy_from * this->PayloadBytes] + s_dx * this->PayloadBytes, + &this->Payloads[copy_to * this->PayloadBytes]); + std::copy(&image.Depths[copy_from], &image.Depths[copy_from] + s_dx, &this->Depths[copy_to]); } } @@ -137,19 +137,19 @@ struct VTKM_RENDERING_EXPORT PayloadImage // void SubsetTo(PayloadImage& image) const { - assert(m_bounds.X.Min >= image.m_bounds.X.Min); - assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); - assert(m_bounds.X.Max <= image.m_bounds.X.Max); - assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + assert(this->Bounds.X.Min >= image.Bounds.X.Min); + assert(this->Bounds.Y.Min >= image.Bounds.Y.Min); + assert(this->Bounds.X.Max <= image.Bounds.X.Max); + assert(this->Bounds.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -159,44 +159,44 @@ struct VTKM_RENDERING_EXPORT PayloadImage const int copy_to = (y + start_y) * dx + start_x; const int copy_from = y * s_dx; - std::copy(&m_payloads[copy_from * m_payload_bytes], - &m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, - &image.m_payloads[copy_to * m_payload_bytes]); + std::copy(&this->Payloads[copy_from * this->PayloadBytes], + &this->Payloads[copy_from * this->PayloadBytes] + s_dx * this->PayloadBytes, + &image.Payloads[copy_to * this->PayloadBytes]); - std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + std::copy(&this->Depths[copy_from], &this->Depths[copy_from] + s_dx, &image.Depths[copy_to]); } } void Swap(PayloadImage& other) { - vtkm::Bounds orig = m_orig_bounds; - vtkm::Bounds bounds = m_bounds; + vtkm::Bounds orig = this->OrigBounds; + vtkm::Bounds bounds = this->Bounds; - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.Bounds; - other.m_orig_bounds = orig; - other.m_bounds = bounds; + other.OrigBounds = orig; + other.Bounds = bounds; - m_payloads.swap(other.m_payloads); - m_depths.swap(other.m_depths); + this->Payloads.swap(other.Payloads); + this->Depths.swap(other.Depths); } void Clear() { vtkm::Bounds empty; - m_orig_bounds = empty; - m_bounds = empty; - m_payloads.clear(); - m_depths.clear(); + this->OrigBounds = empty; + this->Bounds = empty; + this->Payloads.clear(); + this->Depths.clear(); } std::string ToString() const { std::stringstream ss; - ss << "Total size pixels " << (int)m_depths.size(); - ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; - ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ss << "Total size pixels " << (int)this->Depths.size(); + ss << " tile dims: {" << this->Bounds.X.Min << "," << this->Bounds.Y.Min << "} - "; + ss << "{" << this->Bounds.X.Max << "," << this->Bounds.Y.Max << "}\n"; ; return ss.str(); } diff --git a/vtkm/rendering/compositing/PayloadImageCompositor.h b/vtkm/rendering/compositing/PayloadImageCompositor.h index 8ea3c4f4a..2bfb4dedd 100644 --- a/vtkm/rendering/compositing/PayloadImageCompositor.h +++ b/vtkm/rendering/compositing/PayloadImageCompositor.h @@ -32,36 +32,36 @@ public: void ZBufferComposite(vtkm::rendering::compositing::PayloadImage& front, const vtkm::rendering::compositing::PayloadImage& image) { - if (front.m_payload_bytes != image.m_payload_bytes) + if (front.PayloadBytes != image.PayloadBytes) { std::cout << "very bad\n"; } - assert(front.m_depths.size() == front.m_payloads.size() / front.m_payload_bytes); - assert(front.m_bounds.X.Min == image.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == image.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + assert(front.Depths.size() == front.Payloads.size() / front.PayloadBytes); + assert(front.Bounds.X.Min == image.Bounds.X.Min); + assert(front.Bounds.Y.Min == image.Bounds.Y.Min); + assert(front.Bounds.X.Max == image.Bounds.X.Max); + assert(front.Bounds.Y.Max == image.Bounds.Y.Max); - const int size = static_cast(front.m_depths.size()); - const bool nan_check = image.m_default_value != image.m_default_value; + const int size = static_cast(front.Depths.size()); + const bool nan_check = image.DefaultValue != image.DefaultValue; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for #endif for (int i = 0; i < size; ++i) { - const float depth = image.m_depths[i]; - const float fdepth = front.m_depths[i]; + const float depth = image.Depths[i]; + const float fdepth = front.Depths[i]; // this should handle NaNs correctly const bool take_back = fmin(depth, fdepth) == depth; if (take_back) { const int offset = i * 4; - front.m_depths[i] = depth; - const size_t p_offset = i * front.m_payload_bytes; - std::copy(&image.m_payloads[p_offset], - &image.m_payloads[p_offset] + front.m_payload_bytes, - &front.m_payloads[p_offset]); + front.Depths[i] = depth; + const size_t p_offset = i * front.PayloadBytes; + std::copy(&image.Payloads[p_offset], + &image.Payloads[p_offset] + front.PayloadBytes, + &front.Payloads[p_offset]); } } } diff --git a/vtkm/rendering/compositing/RadixKCompositor.cxx b/vtkm/rendering/compositing/RadixKCompositor.cxx index 1bbdc2ace..af1a1f204 100644 --- a/vtkm/rendering/compositing/RadixKCompositor.cxx +++ b/vtkm/rendering/compositing/RadixKCompositor.cxx @@ -84,7 +84,7 @@ void reduce_images(void* b, const int current_dim = partners.dim(round); //create balanced set of ranges for current dim - vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.m_bounds); + vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.Bounds); int range_length = image_bounds.max[current_dim] - image_bounds.min[current_dim]; int base_step = range_length / group_size; int rem = range_length % group_size; @@ -102,7 +102,7 @@ void reduce_images(void* b, assert(count == range_length); std::vector subset_bounds(group_size, - vtkh::VTKMBoundsToDIY(image.m_bounds)); + vtkh::VTKMBoundsToDIY(image.Bounds)); int min_pixel = image_bounds.min[current_dim]; for (int i = 0; i < group_size; ++i) { @@ -150,7 +150,7 @@ RadixKCompositor::~RadixKCompositor() {} template void RadixKCompositor::CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image) { - vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.m_orig_bounds); + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.OrigBounds); // tells diy to use one thread const int num_threads = 1; diff --git a/vtkm/rendering/compositing/vtkm_diy_image_block.h b/vtkm/rendering/compositing/vtkm_diy_image_block.h index 2e9120946..ed074a4a2 100644 --- a/vtkm/rendering/compositing/vtkm_diy_image_block.h +++ b/vtkm/rendering/compositing/vtkm_diy_image_block.h @@ -99,46 +99,46 @@ struct Serialization { static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::PayloadImage& image) { - vtkmdiy::save(bb, image.m_orig_bounds.X.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::save(bb, image.m_orig_bounds.X.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::save(bb, image.OrigBounds.X.Min); + vtkmdiy::save(bb, image.OrigBounds.Y.Min); + vtkmdiy::save(bb, image.OrigBounds.Z.Min); + vtkmdiy::save(bb, image.OrigBounds.X.Max); + vtkmdiy::save(bb, image.OrigBounds.Y.Max); + vtkmdiy::save(bb, image.OrigBounds.Z.Max); - vtkmdiy::save(bb, image.m_bounds.X.Min); - vtkmdiy::save(bb, image.m_bounds.Y.Min); - vtkmdiy::save(bb, image.m_bounds.Z.Min); - vtkmdiy::save(bb, image.m_bounds.X.Max); - vtkmdiy::save(bb, image.m_bounds.Y.Max); - vtkmdiy::save(bb, image.m_bounds.Z.Max); + vtkmdiy::save(bb, image.Bounds.X.Min); + vtkmdiy::save(bb, image.Bounds.Y.Min); + vtkmdiy::save(bb, image.Bounds.Z.Min); + vtkmdiy::save(bb, image.Bounds.X.Max); + vtkmdiy::save(bb, image.Bounds.Y.Max); + vtkmdiy::save(bb, image.Bounds.Z.Max); - vtkmdiy::save(bb, image.m_payloads); - vtkmdiy::save(bb, image.m_payload_bytes); - vtkmdiy::save(bb, image.m_depths); - vtkmdiy::save(bb, image.m_orig_rank); + vtkmdiy::save(bb, image.Payloads); + vtkmdiy::save(bb, image.PayloadBytes); + vtkmdiy::save(bb, image.Depths); + vtkmdiy::save(bb, image.OrigRank); } static void load(BinaryBuffer& bb, vtkm::rendering::compositing::PayloadImage& image) { - vtkmdiy::load(bb, image.m_orig_bounds.X.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::load(bb, image.m_orig_bounds.X.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::load(bb, image.OrigBounds.X.Min); + vtkmdiy::load(bb, image.OrigBounds.Y.Min); + vtkmdiy::load(bb, image.OrigBounds.Z.Min); + vtkmdiy::load(bb, image.OrigBounds.X.Max); + vtkmdiy::load(bb, image.OrigBounds.Y.Max); + vtkmdiy::load(bb, image.OrigBounds.Z.Max); - vtkmdiy::load(bb, image.m_bounds.X.Min); - vtkmdiy::load(bb, image.m_bounds.Y.Min); - vtkmdiy::load(bb, image.m_bounds.Z.Min); - vtkmdiy::load(bb, image.m_bounds.X.Max); - vtkmdiy::load(bb, image.m_bounds.Y.Max); - vtkmdiy::load(bb, image.m_bounds.Z.Max); + vtkmdiy::load(bb, image.Bounds.X.Min); + vtkmdiy::load(bb, image.Bounds.Y.Min); + vtkmdiy::load(bb, image.Bounds.Z.Min); + vtkmdiy::load(bb, image.Bounds.X.Max); + vtkmdiy::load(bb, image.Bounds.Y.Max); + vtkmdiy::load(bb, image.Bounds.Z.Max); - vtkmdiy::load(bb, image.m_payloads); - vtkmdiy::load(bb, image.m_payload_bytes); - vtkmdiy::load(bb, image.m_depths); - vtkmdiy::load(bb, image.m_orig_rank); + vtkmdiy::load(bb, image.Payloads); + vtkmdiy::load(bb, image.PayloadBytes); + vtkmdiy::load(bb, image.Depths); + vtkmdiy::load(bb, image.OrigRank); } }; @@ -147,46 +147,46 @@ struct Serialization { static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::Image& image) { - vtkmdiy::save(bb, image.m_orig_bounds.X.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::save(bb, image.m_orig_bounds.X.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::save(bb, image.OrigBounds.X.Min); + vtkmdiy::save(bb, image.OrigBounds.Y.Min); + vtkmdiy::save(bb, image.OrigBounds.Z.Min); + vtkmdiy::save(bb, image.OrigBounds.X.Max); + vtkmdiy::save(bb, image.OrigBounds.Y.Max); + vtkmdiy::save(bb, image.OrigBounds.Z.Max); - vtkmdiy::save(bb, image.m_bounds.X.Min); - vtkmdiy::save(bb, image.m_bounds.Y.Min); - vtkmdiy::save(bb, image.m_bounds.Z.Min); - vtkmdiy::save(bb, image.m_bounds.X.Max); - vtkmdiy::save(bb, image.m_bounds.Y.Max); - vtkmdiy::save(bb, image.m_bounds.Z.Max); + vtkmdiy::save(bb, image.Bounds.X.Min); + vtkmdiy::save(bb, image.Bounds.Y.Min); + vtkmdiy::save(bb, image.Bounds.Z.Min); + vtkmdiy::save(bb, image.Bounds.X.Max); + vtkmdiy::save(bb, image.Bounds.Y.Max); + vtkmdiy::save(bb, image.Bounds.Z.Max); - vtkmdiy::save(bb, image.m_pixels); - vtkmdiy::save(bb, image.m_depths); - vtkmdiy::save(bb, image.m_orig_rank); - vtkmdiy::save(bb, image.m_composite_order); + vtkmdiy::save(bb, image.Pixels); + vtkmdiy::save(bb, image.Depths); + vtkmdiy::save(bb, image.OrigRank); + vtkmdiy::save(bb, image.CompositeOrder); } static void load(BinaryBuffer& bb, vtkm::rendering::compositing::Image& image) { - vtkmdiy::load(bb, image.m_orig_bounds.X.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::load(bb, image.m_orig_bounds.X.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::load(bb, image.OrigBounds.X.Min); + vtkmdiy::load(bb, image.OrigBounds.Y.Min); + vtkmdiy::load(bb, image.OrigBounds.Z.Min); + vtkmdiy::load(bb, image.OrigBounds.X.Max); + vtkmdiy::load(bb, image.OrigBounds.Y.Max); + vtkmdiy::load(bb, image.OrigBounds.Z.Max); - vtkmdiy::load(bb, image.m_bounds.X.Min); - vtkmdiy::load(bb, image.m_bounds.Y.Min); - vtkmdiy::load(bb, image.m_bounds.Z.Min); - vtkmdiy::load(bb, image.m_bounds.X.Max); - vtkmdiy::load(bb, image.m_bounds.Y.Max); - vtkmdiy::load(bb, image.m_bounds.Z.Max); + vtkmdiy::load(bb, image.Bounds.X.Min); + vtkmdiy::load(bb, image.Bounds.Y.Min); + vtkmdiy::load(bb, image.Bounds.Z.Min); + vtkmdiy::load(bb, image.Bounds.X.Max); + vtkmdiy::load(bb, image.Bounds.Y.Max); + vtkmdiy::load(bb, image.Bounds.Z.Max); - vtkmdiy::load(bb, image.m_pixels); - vtkmdiy::load(bb, image.m_depths); - vtkmdiy::load(bb, image.m_orig_rank); - vtkmdiy::load(bb, image.m_composite_order); + vtkmdiy::load(bb, image.Pixels); + vtkmdiy::load(bb, image.Depths); + vtkmdiy::load(bb, image.OrigRank); + vtkmdiy::load(bb, image.CompositeOrder); } }; } //namespace vtkmdiy From fd613f2c84d770f969074a668324545debf05728 Mon Sep 17 00:00:00 2001 From: Dave Pugmire Date: Thu, 16 Feb 2023 21:55:46 -0500 Subject: [PATCH 07/11] Fixes to rendering. Attempt at getting VR --- vtkm/rendering/View3D.cxx | 78 ++++++++++++++++++- .../testing/UnitTestImageCompositing.cxx | 54 ++++++++++++- 2 files changed, 128 insertions(+), 4 deletions(-) diff --git a/vtkm/rendering/View3D.cxx b/vtkm/rendering/View3D.cxx index bc1998cd4..d1a8167dd 100644 --- a/vtkm/rendering/View3D.cxx +++ b/vtkm/rendering/View3D.cxx @@ -11,6 +11,12 @@ #include #include +#ifdef VTKM_ENABLE_MPI +#include +#include +#include +#endif + namespace vtkm { namespace rendering @@ -49,6 +55,8 @@ void View3D::Paint() return; this->Compositor.SetCompositeMode(vtkm::rendering::compositing::Compositor::Z_BUFFER_SURFACE); + //volume render + this->Compositor.SetCompositeMode(vtkm::rendering::compositing::Compositor::VIS_ORDER_BLEND); /* auto colors = (this->GetCanvas().GetColorBuffer().WritePortal().GetArray())[0][0]; auto depths = (this->GetCanvas().GetDepthBuffer().WritePortal().GetArray()); @@ -82,13 +90,46 @@ void View3D::Paint() void View3D::RenderScreenAnnotations() { - if (this->GetScene().GetNumberOfActors() > 0) + vtkm::Range scalarRange; + + int numActors = this->GetScene().GetNumberOfActors(); + if (numActors > 0) + scalarRange = this->GetScene().GetActor(0).GetScalarRange(); + + int totNumActors = numActors; + + /* +#ifdef VTKM_ENABLE_MPI + auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); + + vtkm::Float64 minVal = scalarRange.Min, maxVal = scalarRange.Max; + + MPI_Comm mpiComm = vtkmdiy::mpi::mpi_cast(comm.handle()); + int totNumActors = 0; + vtkm::Float64 minVal_res = 0, maxVal_res = 0; + MPI_Reduce(&numActors, &totNumActors, 1, MPI_INT, MPI_SUM, 0, mpiComm); + MPI_Reduce(&minVal, &minVal_res, 1, MPI_DOUBLE, MPI_MIN, 0, mpiComm); + MPI_Reduce(&maxVal, &maxVal_res, 1, MPI_DOUBLE, MPI_MAX, 0, mpiComm); + if (comm.rank() != 0) + return; + + scalarRange.Min = minVal_res; + scalarRange.Max = maxVal_res; +#endif + + std::cout<<"totNumActors= "< Date: Thu, 16 Feb 2023 21:56:18 -0500 Subject: [PATCH 08/11] Add other files..... --- README | 123 ++++ .../rendering/compositing/AbsorptionPartial.h | 104 ++++ vtkm/rendering/compositing/EmissionPartial.h | 141 +++++ .../compositing/PartialCompositor.cxx | 537 ++++++++++++++++++ .../rendering/compositing/PartialCompositor.h | 53 ++ vtkm/rendering/compositing/VolumePartial.h | 91 +++ 6 files changed, 1049 insertions(+) create mode 100644 README create mode 100644 vtkm/rendering/compositing/AbsorptionPartial.h create mode 100644 vtkm/rendering/compositing/EmissionPartial.h create mode 100644 vtkm/rendering/compositing/PartialCompositor.cxx create mode 100644 vtkm/rendering/compositing/PartialCompositor.h create mode 100644 vtkm/rendering/compositing/VolumePartial.h diff --git a/README b/README new file mode 100644 index 000000000..3d45fec03 --- /dev/null +++ b/README @@ -0,0 +1,123 @@ +vtkh: +tests/vtkh/vtk-h_render.cpp + +TEST(vtkh_render, vtkh_bg_color) + + +make data +bounds +camera setup. +vtkh::MakeRender(...) + vtkh::Render: info needed to create a single image. N domains = N canvases + camera, bounds, FG/BG colors + +vtkh::RayTracer tracer; +tracer.SetInput(...); + +vtkh::Scene scene; +scene.AddRender(render); +scene.AddRenderer(&tracer); +scene.Render(); + +vtkh::RayTracer : public vtkh::Renderer + --it returns a vtkm::rendering::CanvasRayTracer(width,height) + DoExecute() does the rendering + PostExecute() does the compositing. + + +vtkh::Scene + list of vtkh::Renderer (raytracer, ...) + vector of vtkh::Render (camera, data, fg/bg) + Scene::Render(): + for each renderer: + renderer->Update() does the rendering. + (do opaque, then volume) + RenderWorldAnnotations() + Render::RenderWorldAnnotations() + if rank != 0 then RETURN + Annotator annotator(canvas, camera, m_scene_bounds) + annotator.RenderWorldAnnotations() + RenderScreenAnnotations() + +vtkh::Image. pixels, depth, composite order, ... + + +vtkh::Renderer : public Filter + DoExecute: + for each ds in m_input + m_mapper->RenderCells(ds) + this->Composite(...) + + +vtkh::Compositor + Composite() + calls RadixKCompositor, etc. + + + +vtkh::Render + camera + image name + width/height/bounds + fg/bg colors + +vtkh::RayTracer + SetInput: dataset + SetField: scalar + +vtkh::Scene + AddRender: vtkh::Render + AddRenderer: vtkh::RayTracer + + + +vtkh::Scene.Render(); + + + +======================================================================================= +vtkm: +vtkm::rendering::Scene + AddActor: dataset + Render(mapper, canvas, camera) + +vtkm::rendering::View + camera + canvas + mapper + scene + annotations + Paint() does the rendering + +vtkm::rendering::Canvas + Fg/Bg + color/depth buffers + model/view mtx + + +============================================ +VTK-m: simpler example... ? +examples/demo/Demo.cxx + +vtkm::rendering::Camera (look at, up, clip, etc). +vtkm::rendering::Actor (dataset) +vtkm::rendering::CanvasRayTracer (x,y) + +vtkm::rendering::Scene (actor) +vtkm::rendering::View3D view (scene, mapper, canvas, camera, bg) + +view.Paint() (renders the image). + + + + + +========================================================================================= +Comparing classes, etc. +Camera: the same. vtkm::rendering::Camera + +Scene: +vtkm: vector of vtkm::rendering::Actor +vtkh: list/vector of vtkh::Renderer, batchsize, has_volume. + +vtkh::Render aprox equal to vtkm::rendering::View \ No newline at end of file diff --git a/vtkm/rendering/compositing/AbsorptionPartial.h b/vtkm/rendering/compositing/AbsorptionPartial.h new file mode 100644 index 000000000..69fbe3397 --- /dev/null +++ b/vtkm/rendering/compositing/AbsorptionPartial.h @@ -0,0 +1,104 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2018, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-749865 +// +// All rights reserved. +// +// This file is part of Rover. +// +// Please also read rover/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +#ifndef rover_absorption_partial_h +#define rover_absorption_partial_h + +#include + +namespace vtkh +{ + +template +struct AbsorptionPartial +{ + typedef FloatType ValueType; + int m_pixel_id; + double m_depth; + std::vector m_bins; + + AbsorptionPartial() + : m_pixel_id(0) + , m_depth(0.f) + { + } + + void print() {} + + bool operator<(const AbsorptionPartial& other) const + { + // + // In absorption only we can blend the same + // pixel ids in any order + // + return m_pixel_id < other.m_pixel_id; + } + + inline void blend(const AbsorptionPartial& other) + { + const int num_bins = static_cast(m_bins.size()); + assert(num_bins == (int)other.m_bins.size()); + for (int i = 0; i < num_bins; ++i) + { + m_bins[i] *= other.m_bins[i]; + } + } + + static void composite_background(std::vector& partials, + const std::vector& background) + { + const int size = static_cast(partials.size()); + AbsorptionPartial bg; + bg.m_bins = background; +#ifdef VTKH_OPENMP_ENABLED +#pragma omp parallel for +#endif + for (int i = 0; i < size; ++i) + { + partials[i].blend(bg); + } + } +}; + +} // namespace rover + + +#endif diff --git a/vtkm/rendering/compositing/EmissionPartial.h b/vtkm/rendering/compositing/EmissionPartial.h new file mode 100644 index 000000000..29d6e796c --- /dev/null +++ b/vtkm/rendering/compositing/EmissionPartial.h @@ -0,0 +1,141 @@ +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +// Copyright (c) 2018, Lawrence Livermore National Security, LLC. +// +// Produced at the Lawrence Livermore National Laboratory +// +// LLNL-CODE-749865 +// +// All rights reserved. +// +// This file is part of Rover. +// +// Please also read rover/LICENSE +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the disclaimer below. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the disclaimer (as noted below) in the +// documentation and/or other materials provided with the distribution. +// +// * Neither the name of the LLNS/LLNL nor the names of its contributors may +// be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, +// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~// +#ifndef rover_emission_partial_h +#define rover_emission_partial_h + +#include + +namespace vtkh +{ + +template +struct EmissionPartial +{ + typedef FloatType ValueType; + + int m_pixel_id; + double m_depth; + std::vector m_bins; + std::vector m_emission_bins; + + EmissionPartial() + : m_pixel_id(0) + , m_depth(0.f) + { + } + + void alter_bin(int bin, FloatType value) + { + m_bins[bin] = value; + m_emission_bins[bin] = value; + } + + void print() + { + std::cout << "Partial id " << m_pixel_id << "\n"; + std::cout << "Absorption : "; + for (int i = 0; i < m_bins.size(); ++i) + { + std::cout << m_bins[i] << " "; + } + std::cout << "\n"; + std::cout << "Emission: "; + for (int i = 0; i < m_bins.size(); ++i) + { + std::cout << m_emission_bins[i] << " "; + } + std::cout << "\n"; + } + + bool operator<(const EmissionPartial& other) const + { + if (m_pixel_id != other.m_pixel_id) + { + return m_pixel_id < other.m_pixel_id; + } + else + { + return m_depth < other.m_depth; + } + } + + inline void blend_absorption(const EmissionPartial& other) + { + const int num_bins = static_cast(m_bins.size()); + assert(num_bins == (int)other.m_bins.size()); + for (int i = 0; i < num_bins; ++i) + { + m_bins[i] *= other.m_bins[i]; + } + } + + inline void blend_emission(EmissionPartial& other) + { + const int num_bins = static_cast(m_bins.size()); + assert(num_bins == (int)other.m_bins.size()); + for (int i = 0; i < num_bins; ++i) + { + m_emission_bins[i] *= other.m_bins[i]; + } + } + + inline void add_emission(EmissionPartial& other) + { + const int num_bins = static_cast(m_bins.size()); + assert(num_bins == (int)other.m_bins.size()); + for (int i = 0; i < num_bins; ++i) + { + m_emission_bins[i] += other.m_emission_bins[i]; + } + } + + static void composite_background(std::vector& partials, + const std::vector& background) + { + //for( + } +}; + +} // namespace rover + + +#endif diff --git a/vtkm/rendering/compositing/PartialCompositor.cxx b/vtkm/rendering/compositing/PartialCompositor.cxx new file mode 100644 index 000000000..43f08dc1c --- /dev/null +++ b/vtkm/rendering/compositing/PartialCompositor.cxx @@ -0,0 +1,537 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include +#include +#include + +#ifdef VTKM_ENABLE_MPI +#include "vtkh_diy_partial_collect.hpp" +#include "vtkh_diy_partial_redistribute.hpp" +#include +#endif + +namespace vtkh +{ +namespace detail +{ +template