diff --git a/vtkm/rendering/View3D.cxx b/vtkm/rendering/View3D.cxx index 940e66ba4..bc1998cd4 100644 --- a/vtkm/rendering/View3D.cxx +++ b/vtkm/rendering/View3D.cxx @@ -71,10 +71,10 @@ void View3D::Paint() const int offset = i * 4; vtkm::Vec4f_32 rgba; for (int j = 0; j < 4; j++) - rgba[j] = static_cast(result.m_pixels[offset + j] / 255.f); + rgba[j] = static_cast(result.Pixels[offset + j] / 255.f); colors.WritePortal().Set(i, rgba); - depths.WritePortal().Set(i, result.m_depths[i]); + depths.WritePortal().Set(i, result.Depths[i]); } } #endif diff --git a/vtkm/rendering/compositing/Compositor.cxx b/vtkm/rendering/compositing/Compositor.cxx index 9dcd0ae8e..23e309a4e 100644 --- a/vtkm/rendering/compositing/Compositor.cxx +++ b/vtkm/rendering/compositing/Compositor.cxx @@ -30,7 +30,7 @@ namespace compositing { Compositor::Compositor() - : m_composite_mode(Z_BUFFER_SURFACE) + : CompositingMode(Z_BUFFER_SURFACE) { } @@ -39,13 +39,13 @@ Compositor::~Compositor() {} void Compositor::SetCompositeMode(CompositeMode composite_mode) { // assure we don't have mixed image types - assert(m_images.size() == 0); - m_composite_mode = composite_mode; + assert(this->Images.size() == 0); + this->CompositingMode = composite_mode; } void Compositor::ClearImages() { - m_images.clear(); + this->Images.clear(); } void Compositor::AddImage(vtkm::rendering::Canvas& canvas) @@ -55,29 +55,29 @@ void Compositor::AddImage(vtkm::rendering::Canvas& canvas) vtkm::Id width = canvas.GetWidth(); vtkm::Id height = canvas.GetHeight(); - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depths != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(colors, depths, width, height); - //m_images[0].Save("first.png"); + this->Images.push_back(image); + this->Images[0].Init(colors, depths, width, height); + //this->Images[0].Save("first.png"); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image // image.Init(colors, depths, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(colors, depths, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(colors, depths, width, height); } } @@ -98,29 +98,29 @@ void Compositor::AddImage(const unsigned char* color_buffer, const int width, const int height) { - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depth_buffer != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(color_buffer, depth_buffer, width, height); - //m_images[0].Save("first.png"); + this->Images.push_back(image); + this->Images[0].Init(color_buffer, depth_buffer, width, height); + //this->Images[0].Save("first.png"); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image // image.Init(color_buffer, depth_buffer, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height); } } @@ -129,15 +129,15 @@ void Compositor::AddImage(const float* color_buffer, const int width, const int height) { - assert(m_composite_mode != VIS_ORDER_BLEND); + assert(this->CompositingMode != VIS_ORDER_BLEND); assert(depth_buffer != NULL); Image image; - if (m_images.size() == 0) + if (this->Images.size() == 0) { - m_images.push_back(image); - m_images[0].Init(color_buffer, depth_buffer, width, height); + this->Images.push_back(image); + this->Images[0].Init(color_buffer, depth_buffer, width, height); } - else if (m_composite_mode == Z_BUFFER_SURFACE) + else if (this->CompositingMode == Z_BUFFER_SURFACE) { // // Do local composite and keep a single image @@ -145,13 +145,13 @@ void Compositor::AddImage(const float* color_buffer, image.Init(color_buffer, depth_buffer, width, height); vtkm::rendering::compositing::ImageCompositor compositor; - compositor.ZBufferComposite(m_images[0], image); + compositor.ZBufferComposite(this->Images[0], image); } else { - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height); } } @@ -161,11 +161,11 @@ void Compositor::AddImage(const unsigned char* color_buffer, const int height, const int vis_order) { - assert(m_composite_mode == VIS_ORDER_BLEND); + assert(this->CompositingMode == VIS_ORDER_BLEND); Image image; - const size_t image_index = m_images.size(); - m_images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } @@ -175,33 +175,33 @@ void Compositor::AddImage(const float* color_buffer, const int height, const int vis_order) { - assert(m_composite_mode == VIS_ORDER_BLEND); + assert(this->CompositingMode == VIS_ORDER_BLEND); Image image; - const size_t image_index = m_images.size(); - m_images.push_back(image); + const size_t image_index = this->Images.size(); + this->Images.push_back(image); - m_images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); + this->Images[image_index].Init(color_buffer, depth_buffer, width, height, vis_order); } */ Image Compositor::Composite() { - assert(m_images.size() != 0); + assert(this->Images.size() != 0); - if (m_composite_mode == Z_BUFFER_SURFACE) + if (this->CompositingMode == Z_BUFFER_SURFACE) { CompositeZBufferSurface(); } - else if (m_composite_mode == Z_BUFFER_BLEND) + else if (this->CompositingMode == Z_BUFFER_BLEND) { CompositeZBufferBlend(); } - else if (m_composite_mode == VIS_ORDER_BLEND) + else if (this->CompositingMode == VIS_ORDER_BLEND) { CompositeVisOrder(); } // Make this a param to avoid the copy? - return m_images[0]; + return this->Images[0]; } void Compositor::Cleanup() {} @@ -220,9 +220,9 @@ void Compositor::CompositeZBufferSurface() #ifdef VTKM_ENABLE_MPI auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); - assert(m_images.size() == 1); + assert(this->Images.size() == 1); RadixKCompositor compositor; - compositor.CompositeSurface(comm, this->m_images[0]); + compositor.CompositeSurface(comm, this->Images[0]); m_log_stream << compositor.GetTimingString(); #endif } @@ -237,12 +237,12 @@ void Compositor::CompositeVisOrder() #ifdef VTKM_ENABLE_MPI auto comm = vtkm::cont::EnvironmentTracker::GetCommunicator(); - assert(m_images.size() != 0); + assert(this->Images.size() != 0); vtkm::rendering::compositing::DirectSendCompositor compositor; - compositor.CompositeVolume(comm, this->m_images); + compositor.CompositeVolume(comm, this->Images); #else vtkm::rendering::compositing::ImageCompositor compositor; - compositor.OrderedComposite(m_images); + compositor.OrderedComposite(this->Images); #endif } diff --git a/vtkm/rendering/compositing/Compositor.h b/vtkm/rendering/compositing/Compositor.h index 29bf00f04..8ce85e853 100644 --- a/vtkm/rendering/compositing/Compositor.h +++ b/vtkm/rendering/compositing/Compositor.h @@ -97,8 +97,8 @@ protected: virtual void CompositeVisOrder(); std::stringstream m_log_stream; - CompositeMode m_composite_mode; - std::vector m_images; + CompositeMode CompositingMode; + std::vector Images; }; } diff --git a/vtkm/rendering/compositing/DirectSendCompositor.cxx b/vtkm/rendering/compositing/DirectSendCompositor.cxx index f6638d41a..344f92891 100644 --- a/vtkm/rendering/compositing/DirectSendCompositor.cxx +++ b/vtkm/rendering/compositing/DirectSendCompositor.cxx @@ -80,7 +80,7 @@ struct Redistribute proxy.enqueue(it->first, it->second); } } // if - else if (block->m_images.at(0).m_composite_order != -1) + else if (block->m_images.at(0).CompositeOrder != -1) { // blend images according to vis order std::vector images; @@ -103,8 +103,8 @@ struct Redistribute block->m_output.Swap(images[0]); } // else if - else if (block->m_images.at(0).m_composite_order == -1 && - block->m_images.at(0).HasTransparency()) + else if (block->m_images.at(0).CompositeOrder == -1 && + block->m_images.at(0).GetHasTransparency()) { std::vector images; for (int i = 0; i < proxy.in_link().size(); ++i) @@ -140,7 +140,7 @@ DirectSendCompositor::~DirectSendCompositor() {} void DirectSendCompositor::CompositeVolume(vtkmdiy::mpi::communicator& diy_comm, std::vector& images) { - vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(images.at(0).m_orig_bounds); + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(images.at(0).OrigBounds); const int num_threads = 1; const int num_blocks = diy_comm.size(); diff --git a/vtkm/rendering/compositing/Image.cxx b/vtkm/rendering/compositing/Image.cxx index 8e1c838b5..02fa20f93 100644 --- a/vtkm/rendering/compositing/Image.cxx +++ b/vtkm/rendering/compositing/Image.cxx @@ -12,20 +12,20 @@ namespace compositing void Image::Save(const std::string& name, const std::vector& comments) { - PNGEncoder encoder; - encoder.Encode(&m_pixels[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + vtkm::rendering::compositing::PNGEncoder encoder; + encoder.Encode(&this->Pixels[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } void Image::Save(const std::string& name, const std::vector& comments) const { - PNGEncoder encoder; - encoder.Encode(&m_pixels[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + vtkm::rendering::compositing::PNGEncoder encoder; + encoder.Encode(&this->Pixels[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } diff --git a/vtkm/rendering/compositing/Image.h b/vtkm/rendering/compositing/Image.h index f190e2623..523069b0f 100644 --- a/vtkm/rendering/compositing/Image.h +++ b/vtkm/rendering/compositing/Image.h @@ -28,60 +28,60 @@ struct VTKM_RENDERING_EXPORT Image { // The image bounds are indicated by a grid starting at // 1-width and 1-height. Actual width would be calculated - // m_bounds.X.Max - m_bounds.X.Min + 1 + // Bounds.X.Max - Bounds.X.Min + 1 // 1024 - 1 + 1 = 1024 - vtkm::Bounds m_orig_bounds; - vtkm::Bounds m_bounds; - std::vector m_pixels; - std::vector m_depths; - int m_orig_rank; - bool m_has_transparency; - int m_composite_order; + vtkm::Bounds OrigBounds; + vtkm::Bounds Bounds; + std::vector Pixels; + std::vector Depths; + int OrigRank; + bool HasTransparency; + int CompositeOrder; Image() - : m_orig_rank(-1) - , m_has_transparency(false) - , m_composite_order(-1) + : OrigRank(-1) + , HasTransparency(false) + , CompositeOrder(-1) { } Image(const vtkm::Bounds& bounds) - : m_orig_bounds(bounds) - , m_bounds(bounds) - , m_orig_rank(-1) - , m_has_transparency(false) - , m_composite_order(-1) + : OrigBounds(bounds) + , Bounds(bounds) + , OrigRank(-1) + , HasTransparency(false) + , CompositeOrder(-1) { const int dx = bounds.X.Max - bounds.X.Min + 1; const int dy = bounds.Y.Max - bounds.Y.Min + 1; - m_pixels.resize(dx * dy * 4); - m_depths.resize(dx * dy); + this->Pixels.resize(dx * dy * 4); + this->Depths.resize(dx * dy); } // init this image based on the original bounds // of the other image void InitOriginal(const Image& other) { - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_orig_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.OrigBounds; - const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; - m_pixels.resize(dx * dy * 4); - m_depths.resize(dx * dy); + const int dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; + this->Pixels.resize(dx * dy * 4); + this->Depths.resize(dx * dy); - m_orig_rank = -1; - m_has_transparency = false; - m_composite_order = -1; + this->OrigRank = -1; + this->HasTransparency = false; + this->CompositeOrder = -1; } - int GetNumberOfPixels() const { return static_cast(m_pixels.size() / 4); } + int GetNumberOfPixels() const { return static_cast(this->Pixels.size() / 4); } - void SetHasTransparency(bool has_transparency) { m_has_transparency = has_transparency; } + void SetHasTransparency(bool has_transparency) { this->HasTransparency = has_transparency; } - bool HasTransparency() { return m_has_transparency; } + bool GetHasTransparency() { return this->HasTransparency; } void Init(const float* color_buffer, const float* depth_buffer, @@ -89,15 +89,15 @@ struct VTKM_RENDERING_EXPORT Image vtkm::Id height, int composite_order = -1) { - m_composite_order = composite_order; - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->CompositeOrder = composite_order; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_pixels.resize(size * 4); - m_depths.resize(size); + this->Pixels.resize(size * 4); + this->Depths.resize(size); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -105,16 +105,16 @@ struct VTKM_RENDERING_EXPORT Image for (int i = 0; i < size; ++i) { const int offset = i * 4; - m_pixels[offset + 0] = static_cast(color_buffer[offset + 0] * 255.f); - m_pixels[offset + 1] = static_cast(color_buffer[offset + 1] * 255.f); - m_pixels[offset + 2] = static_cast(color_buffer[offset + 2] * 255.f); - m_pixels[offset + 3] = static_cast(color_buffer[offset + 3] * 255.f); + this->Pixels[offset + 0] = static_cast(color_buffer[offset + 0] * 255.f); + this->Pixels[offset + 1] = static_cast(color_buffer[offset + 1] * 255.f); + this->Pixels[offset + 2] = static_cast(color_buffer[offset + 2] * 255.f); + this->Pixels[offset + 3] = static_cast(color_buffer[offset + 3] * 255.f); float depth = depth_buffer[i]; //make sure we can do a single comparison on depth //deal with negative depth values //TODO: This may not be the best way depth = depth < 0 ? abs(depth) : depth; - m_depths[i] = depth; + this->Depths[i] = depth; } } @@ -124,18 +124,18 @@ struct VTKM_RENDERING_EXPORT Image vtkm::Id height, int composite_order = -1) { - m_composite_order = composite_order; - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->CompositeOrder = composite_order; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_pixels.resize(size * 4); - m_depths.resize(size); + this->Pixels.resize(size * 4); + this->Depths.resize(size); - std::copy(color_buffer, color_buffer + size * 4, &m_pixels[0]); + std::copy(color_buffer, color_buffer + size * 4, &this->Pixels[0]); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -145,7 +145,7 @@ struct VTKM_RENDERING_EXPORT Image float depth = depth_buffer[i]; //make sure we can do a single comparison on depth depth = depth < 0 ? 2.f : depth; - m_depths[i] = depth; + this->Depths[i] = depth; } // for } @@ -153,7 +153,7 @@ struct VTKM_RENDERING_EXPORT Image void CompositeBackground(const float* color) { - const int size = static_cast(m_pixels.size() / 4); + const int size = static_cast(this->Pixels.size() / 4); unsigned char bg_color[4]; for (int i = 0; i < 4; ++i) { @@ -166,12 +166,12 @@ struct VTKM_RENDERING_EXPORT Image for (int i = 0; i < size; ++i) { const int offset = i * 4; - unsigned int alpha = static_cast(m_pixels[offset + 3]); + unsigned int alpha = static_cast(this->Pixels[offset + 3]); const float opacity = (255 - alpha); - m_pixels[offset + 0] += static_cast(opacity * bg_color[0] / 255); - m_pixels[offset + 1] += static_cast(opacity * bg_color[1] / 255); - m_pixels[offset + 2] += static_cast(opacity * bg_color[2] / 255); - m_pixels[offset + 3] += static_cast(opacity * bg_color[3] / 255); + this->Pixels[offset + 0] += static_cast(opacity * bg_color[0] / 255); + this->Pixels[offset + 1] += static_cast(opacity * bg_color[1] / 255); + this->Pixels[offset + 2] += static_cast(opacity * bg_color[2] / 255); + this->Pixels[offset + 3] += static_cast(opacity * bg_color[3] / 255); } } // @@ -179,28 +179,28 @@ struct VTKM_RENDERING_EXPORT Image // void SubsetFrom(const Image& image, const vtkm::Bounds& sub_region) { - m_orig_bounds = image.m_orig_bounds; - m_bounds = sub_region; - m_orig_rank = image.m_orig_rank; - m_composite_order = image.m_composite_order; + this->OrigBounds = image.OrigBounds; + this->Bounds = sub_region; + this->OrigRank = image.OrigRank; + this->CompositeOrder = image.CompositeOrder; - assert(sub_region.X.Min >= image.m_bounds.X.Min); - assert(sub_region.Y.Min >= image.m_bounds.Y.Min); - assert(sub_region.X.Max <= image.m_bounds.X.Max); - assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + assert(sub_region.X.Min >= image.Bounds.X.Min); + assert(sub_region.Y.Min >= image.Bounds.Y.Min); + assert(sub_region.X.Max <= image.Bounds.X.Max); + assert(sub_region.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; const int end_y = start_y + s_dy; - m_pixels.resize(s_dx * s_dy * 4); - m_depths.resize(s_dx * s_dy); + this->Pixels.resize(s_dx * s_dy * 4); + this->Depths.resize(s_dx * s_dy); @@ -212,10 +212,10 @@ struct VTKM_RENDERING_EXPORT Image const int copy_to = (y - start_y) * s_dx; const int copy_from = y * dx + start_x; - std::copy(&image.m_pixels[copy_from * 4], - &image.m_pixels[copy_from * 4] + s_dx * 4, - &m_pixels[copy_to * 4]); - std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + std::copy(&image.Pixels[copy_from * 4], + &image.Pixels[copy_from * 4] + s_dx * 4, + &this->Pixels[copy_to * 4]); + std::copy(&image.Depths[copy_from], &image.Depths[copy_from] + s_dx, &this->Depths[copy_to]); } } @@ -230,17 +230,17 @@ struct VTKM_RENDERING_EXPORT Image int index = color % 3; c[index] = 255 - color * 11; ; - const int size = static_cast(m_pixels.size()); + const int size = static_cast(this->Pixels.size()); for (int i = 0; i < size; ++i) { - float d = m_depths[i / 4]; + float d = this->Depths[i / 4]; if (d > 0 && d < 1) { - m_pixels[i] = c[i % 4]; + this->Pixels[i] = c[i % 4]; } else { - m_pixels[i] = 155; + this->Pixels[i] = 155; } } } @@ -249,20 +249,20 @@ struct VTKM_RENDERING_EXPORT Image // void SubsetTo(Image& image) const { - image.m_composite_order = m_composite_order; - assert(m_bounds.X.Min >= image.m_bounds.X.Min); - assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); - assert(m_bounds.X.Max <= image.m_bounds.X.Max); - assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + image.CompositeOrder = this->CompositeOrder; + assert(this->Bounds.X.Min >= image.Bounds.X.Min); + assert(this->Bounds.Y.Min >= image.Bounds.Y.Min); + assert(this->Bounds.X.Max <= image.Bounds.X.Max); + assert(this->Bounds.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -272,44 +272,44 @@ struct VTKM_RENDERING_EXPORT Image const int copy_to = (y + start_y) * dx + start_x; const int copy_from = y * s_dx; - std::copy(&m_pixels[copy_from * 4], - &m_pixels[copy_from * 4] + s_dx * 4, - &image.m_pixels[copy_to * 4]); + std::copy(&this->Pixels[copy_from * 4], + &this->Pixels[copy_from * 4] + s_dx * 4, + &image.Pixels[copy_to * 4]); - std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + std::copy(&this->Depths[copy_from], &this->Depths[copy_from] + s_dx, &image.Depths[copy_to]); } } void Swap(Image& other) { - vtkm::Bounds orig = m_orig_bounds; - vtkm::Bounds bounds = m_bounds; + vtkm::Bounds orig = this->OrigBounds; + vtkm::Bounds bounds = this->Bounds; - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.Bounds; - other.m_orig_bounds = orig; - other.m_bounds = bounds; + other.OrigBounds = orig; + other.Bounds = bounds; - m_pixels.swap(other.m_pixels); - m_depths.swap(other.m_depths); + this->Pixels.swap(other.Pixels); + this->Depths.swap(other.Depths); } void Clear() { vtkm::Bounds empty; - m_orig_bounds = empty; - m_bounds = empty; - m_pixels.clear(); - m_depths.clear(); + this->OrigBounds = empty; + this->Bounds = empty; + this->Pixels.clear(); + this->Depths.clear(); } std::string ToString() const { std::stringstream ss; - ss << "Total size pixels " << (int)m_pixels.size() / 4; - ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; - ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ss << "Total size pixels " << (int)this->Pixels.size() / 4; + ss << " tile dims: {" << this->Bounds.X.Min << "," << this->Bounds.Y.Min << "} - "; + ss << "{" << this->Bounds.X.Max << "," << this->Bounds.Y.Max << "}\n"; ; return ss.str(); } @@ -322,7 +322,7 @@ struct CompositeOrderSort { inline bool operator()(const Image& lhs, const Image& rhs) const { - return lhs.m_composite_order < rhs.m_composite_order; + return lhs.CompositeOrder < rhs.CompositeOrder; } }; diff --git a/vtkm/rendering/compositing/ImageCompositor.h b/vtkm/rendering/compositing/ImageCompositor.h index 40fdf176c..ad8aff7dc 100644 --- a/vtkm/rendering/compositing/ImageCompositor.h +++ b/vtkm/rendering/compositing/ImageCompositor.h @@ -28,12 +28,11 @@ class VTKM_RENDERING_EXPORT ImageCompositor public: void Blend(vtkm::rendering::compositing::Image& front, vtkm::rendering::compositing::Image& back) { - - assert(front.m_bounds.X.Min == back.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == back.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == back.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == back.m_bounds.Y.Max); - const int size = static_cast(front.m_pixels.size() / 4); + assert(front.Bounds.X.Min == back.Bounds.X.Min); + assert(front.Bounds.Y.Min == back.Bounds.Y.Min); + assert(front.Bounds.X.Max == back.Bounds.X.Max); + assert(front.Bounds.Y.Max == back.Bounds.Y.Max); + const int size = static_cast(front.Pixels.size() / 4); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -41,52 +40,52 @@ public: for (int i = 0; i < size; ++i) { const int offset = i * 4; - unsigned int alpha = front.m_pixels[offset + 3]; + unsigned int alpha = front.Pixels[offset + 3]; const unsigned int opacity = 255 - alpha; - front.m_pixels[offset + 0] += - static_cast(opacity * back.m_pixels[offset + 0] / 255); - front.m_pixels[offset + 1] += - static_cast(opacity * back.m_pixels[offset + 1] / 255); - front.m_pixels[offset + 2] += - static_cast(opacity * back.m_pixels[offset + 2] / 255); - front.m_pixels[offset + 3] += - static_cast(opacity * back.m_pixels[offset + 3] / 255); + front.Pixels[offset + 0] += + static_cast(opacity * back.Pixels[offset + 0] / 255); + front.Pixels[offset + 1] += + static_cast(opacity * back.Pixels[offset + 1] / 255); + front.Pixels[offset + 2] += + static_cast(opacity * back.Pixels[offset + 2] / 255); + front.Pixels[offset + 3] += + static_cast(opacity * back.Pixels[offset + 3] / 255); - float d1 = std::min(front.m_depths[i], 1.001f); - float d2 = std::min(back.m_depths[i], 1.001f); + float d1 = std::min(front.Depths[i], 1.001f); + float d2 = std::min(back.Depths[i], 1.001f); float depth = std::min(d1, d2); - front.m_depths[i] = depth; + front.Depths[i] = depth; } } void ZBufferComposite(vtkm::rendering::compositing::Image& front, const vtkm::rendering::compositing::Image& image) { - assert(front.m_depths.size() == front.m_pixels.size() / 4); - assert(front.m_bounds.X.Min == image.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == image.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + assert(front.Depths.size() == front.Pixels.size() / 4); + assert(front.Bounds.X.Min == image.Bounds.X.Min); + assert(front.Bounds.Y.Min == image.Bounds.Y.Min); + assert(front.Bounds.X.Max == image.Bounds.X.Max); + assert(front.Bounds.Y.Max == image.Bounds.Y.Max); - const int size = static_cast(front.m_depths.size()); + const int size = static_cast(front.Depths.size()); #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for #endif for (int i = 0; i < size; ++i) { - const float depth = image.m_depths[i]; - if (depth > 1.f || front.m_depths[i] < depth) + const float depth = image.Depths[i]; + if (depth > 1.f || front.Depths[i] < depth) { continue; } const int offset = i * 4; - front.m_depths[i] = abs(depth); - front.m_pixels[offset + 0] = image.m_pixels[offset + 0]; - front.m_pixels[offset + 1] = image.m_pixels[offset + 1]; - front.m_pixels[offset + 2] = image.m_pixels[offset + 2]; - front.m_pixels[offset + 3] = image.m_pixels[offset + 3]; + front.Depths[i] = abs(depth); + front.Pixels[offset + 0] = image.Pixels[offset + 0]; + front.Pixels[offset + 1] = image.Pixels[offset + 1]; + front.Pixels[offset + 2] = image.Pixels[offset + 2]; + front.Pixels[offset + 3] = image.Pixels[offset + 3]; } } @@ -111,19 +110,19 @@ public: struct Pixel { - unsigned char m_color[4]; - float m_depth; - int m_pixel_id; // local (sub-image) pixels id + unsigned char Color[4]; + float Depth; + int PixelId; // local (sub-image) pixels id bool operator<(const Pixel& other) const { - if (m_pixel_id != other.m_pixel_id) + if (this->PixelId != other.PixelId) { - return m_pixel_id < other.m_pixel_id; + return this->PixelId < other.PixelId; } else { - return m_depth < other.m_depth; + return this->Depth < other.Depth; } } }; @@ -147,12 +146,12 @@ public: for (int j = 0; j < image_size; ++j) { const int image_offset = j * 4; - pixels[offset + j].m_color[0] = images[i].m_pixels[image_offset + 0]; - pixels[offset + j].m_color[1] = images[i].m_pixels[image_offset + 1]; - pixels[offset + j].m_color[2] = images[i].m_pixels[image_offset + 2]; - pixels[offset + j].m_color[3] = images[i].m_pixels[image_offset + 3]; - pixels[offset + j].m_depth = images[i].m_depths[j]; - pixels[offset + j].m_pixel_id = j; + pixels[offset + j].Color[0] = images[i].Pixels[image_offset + 0]; + pixels[offset + j].Color[1] = images[i].Pixels[image_offset + 1]; + pixels[offset + j].Color[2] = images[i].Pixels[image_offset + 2]; + pixels[offset + j].Color[3] = images[i].Pixels[image_offset + 3]; + pixels[offset + j].Depth = images[i].Depths[j]; + pixels[offset + j].PixelId = j; } // for pixels } // for images } @@ -174,10 +173,10 @@ public: } // check to see if that worked - int pixel_id_0 = pixels[0].m_pixel_id; + int pixel_id_0 = pixels[0].PixelId; for (int i = 1; i < num_images; ++i) { - assert(pixel_id_0 == pixels[i].m_pixel_id); + assert(pixel_id_0 == pixels[i].PixelId); } @@ -190,27 +189,23 @@ public: Pixel pixel = pixels[index]; for (int j = 1; j < num_images; ++j) { - if (pixel.m_color[3] == 255 || pixel.m_depth > 1.f) + if (pixel.Color[3] == 255 || pixel.Depth > 1.f) { break; } - unsigned int alpha = pixel.m_color[3]; + unsigned int alpha = pixel.Color[3]; const unsigned int opacity = 255 - alpha; - pixel.m_color[0] += - static_cast(opacity * pixels[index + j].m_color[0] / 255); - pixel.m_color[1] += - static_cast(opacity * pixels[index + j].m_color[1] / 255); - pixel.m_color[2] += - static_cast(opacity * pixels[index + j].m_color[2] / 255); - pixel.m_color[3] += - static_cast(opacity * pixels[index + j].m_color[3] / 255); - pixel.m_depth = pixels[index + j].m_depth; + pixel.Color[0] += static_cast(opacity * pixels[index + j].Color[0] / 255); + pixel.Color[1] += static_cast(opacity * pixels[index + j].Color[1] / 255); + pixel.Color[2] += static_cast(opacity * pixels[index + j].Color[2] / 255); + pixel.Color[3] += static_cast(opacity * pixels[index + j].Color[3] / 255); + pixel.Depth = pixels[index + j].Depth; } // for each image - images[0].m_pixels[i * 4 + 0] = pixel.m_color[0]; - images[0].m_pixels[i * 4 + 1] = pixel.m_color[1]; - images[0].m_pixels[i * 4 + 2] = pixel.m_color[2]; - images[0].m_pixels[i * 4 + 3] = pixel.m_color[3]; - images[0].m_depths[i] = pixel.m_depth; + images[0].Pixels[i * 4 + 0] = pixel.Color[0]; + images[0].Pixels[i * 4 + 1] = pixel.Color[1]; + images[0].Pixels[i * 4 + 2] = pixel.Color[2]; + images[0].Pixels[i * 4 + 3] = pixel.Color[3]; + images[0].Depths[i] = pixel.Depth; } // for each pixel } }; diff --git a/vtkm/rendering/compositing/PayloadImage.cxx b/vtkm/rendering/compositing/PayloadImage.cxx index 10ff0f89d..9fb99f715 100644 --- a/vtkm/rendering/compositing/PayloadImage.cxx +++ b/vtkm/rendering/compositing/PayloadImage.cxx @@ -21,9 +21,9 @@ namespace compositing void PayloadImage::Save(const std::string& name, const std::vector& comments) { PNGEncoder encoder; - encoder.Encode(&m_payloads[0], - m_bounds.X.Max - m_bounds.X.Min + 1, - m_bounds.Y.Max - m_bounds.Y.Min + 1, + encoder.Encode(&this->Payloads[0], + this->Bounds.X.Max - this->Bounds.X.Min + 1, + this->Bounds.Y.Max - this->Bounds.Y.Min + 1, comments); encoder.Save(name); } diff --git a/vtkm/rendering/compositing/PayloadImage.h b/vtkm/rendering/compositing/PayloadImage.h index 501e82715..5bbbd87a6 100644 --- a/vtkm/rendering/compositing/PayloadImage.h +++ b/vtkm/rendering/compositing/PayloadImage.h @@ -28,62 +28,62 @@ struct VTKM_RENDERING_EXPORT PayloadImage { // The image bounds are indicated by a grid starting at // 1-width and 1-height. Actual width would be calculated - // m_bounds.X.Max - m_bounds.X.Min + 1 + // Bounds.X.Max - Bounds.X.Min + 1 // 1024 - 1 + 1 = 1024 - vtkm::Bounds m_orig_bounds; - vtkm::Bounds m_bounds; - std::vector m_payloads; - std::vector m_depths; - int m_orig_rank; - int m_payload_bytes; // Size of the payload in bytes - float m_default_value; + vtkm::Bounds OrigBounds; + vtkm::Bounds Bounds; + std::vector Payloads; + std::vector Depths; + int OrigRank; + int PayloadBytes; // Size of the payload in bytes + float DefaultValue; PayloadImage() {} PayloadImage(const vtkm::Bounds& bounds, const int payload_bytes) - : m_orig_bounds(bounds) - , m_bounds(bounds) - , m_orig_rank(-1) - , m_payload_bytes(payload_bytes) + : OrigBounds(bounds) + , Bounds(bounds) + , OrigRank(-1) + , PayloadBytes(payload_bytes) { - m_default_value = vtkm::Nan32(); + DefaultValue = vtkm::Nan32(); const int dx = bounds.X.Max - bounds.X.Min + 1; const int dy = bounds.Y.Max - bounds.Y.Min + 1; - m_payloads.resize(dx * dy * m_payload_bytes); - m_depths.resize(dx * dy); + this->Payloads.resize(dx * dy * this->PayloadBytes); + this->Depths.resize(dx * dy); } void InitOriginal(const PayloadImage& other) { - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_orig_bounds; - m_payload_bytes = other.m_payload_bytes; - m_default_value = other.m_default_value; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.OrigBounds; + this->PayloadBytes = other.PayloadBytes; + this->DefaultValue = other.DefaultValue; - const int dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; - m_payloads.resize(dx * dy * m_payload_bytes); - m_depths.resize(dx * dy); + const int dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; + this->Payloads.resize(dx * dy * this->PayloadBytes); + this->Depths.resize(dx * dy); - m_orig_rank = -1; + this->OrigRank = -1; } - int GetNumberOfPixels() const { return static_cast(m_depths.size()); } + int GetNumberOfPixels() const { return static_cast(this->Depths.size()); } void Init(const unsigned char* payload_buffer, const float* depth_buffer, int width, int height) { - m_bounds.X.Min = 1; - m_bounds.Y.Min = 1; - m_bounds.X.Max = width; - m_bounds.Y.Max = height; - m_orig_bounds = m_bounds; + this->Bounds.X.Min = 1; + this->Bounds.Y.Min = 1; + this->Bounds.X.Max = width; + this->Bounds.Y.Max = height; + this->OrigBounds = this->Bounds; const int size = width * height; - m_payloads.resize(size * m_payload_bytes); - m_depths.resize(size); + this->Payloads.resize(size * this->PayloadBytes); + this->Depths.resize(size); - std::copy(payload_buffer, payload_buffer + size * m_payload_bytes, &m_payloads[0]); + std::copy(payload_buffer, payload_buffer + size * this->PayloadBytes, &this->Payloads[0]); - std::copy(depth_buffer, depth_buffer + size, &m_depths[0]); + std::copy(depth_buffer, depth_buffer + size, &this->Depths[0]); } // @@ -91,30 +91,30 @@ struct VTKM_RENDERING_EXPORT PayloadImage // void SubsetFrom(const PayloadImage& image, const vtkm::Bounds& sub_region) { - m_orig_bounds = image.m_orig_bounds; - m_bounds = sub_region; - m_orig_rank = image.m_orig_rank; - m_payload_bytes = image.m_payload_bytes; + this->OrigBounds = image.OrigBounds; + this->Bounds = sub_region; + this->OrigRank = image.OrigRank; + this->PayloadBytes = image.PayloadBytes; - assert(sub_region.X.Min >= image.m_bounds.X.Min); - assert(sub_region.Y.Min >= image.m_bounds.Y.Min); - assert(sub_region.X.Max <= image.m_bounds.X.Max); - assert(sub_region.Y.Max <= image.m_bounds.Y.Max); + assert(sub_region.X.Min >= image.Bounds.X.Min); + assert(sub_region.Y.Min >= image.Bounds.Y.Min); + assert(sub_region.X.Max <= image.Bounds.X.Max); + assert(sub_region.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; const int end_y = start_y + s_dy; - size_t buffer_size = s_dx * s_dy * m_payload_bytes; + size_t buffer_size = s_dx * s_dy * this->PayloadBytes; - m_payloads.resize(buffer_size); - m_depths.resize(s_dx * s_dy); + this->Payloads.resize(buffer_size); + this->Depths.resize(s_dx * s_dy); #ifdef VTKH_OPENMP_ENABLED @@ -125,10 +125,10 @@ struct VTKM_RENDERING_EXPORT PayloadImage const int copy_to = (y - start_y) * s_dx; const int copy_from = y * dx + start_x; - std::copy(&image.m_payloads[copy_from * m_payload_bytes], - &image.m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, - &m_payloads[copy_to * m_payload_bytes]); - std::copy(&image.m_depths[copy_from], &image.m_depths[copy_from] + s_dx, &m_depths[copy_to]); + std::copy(&image.Payloads[copy_from * this->PayloadBytes], + &image.Payloads[copy_from * this->PayloadBytes] + s_dx * this->PayloadBytes, + &this->Payloads[copy_to * this->PayloadBytes]); + std::copy(&image.Depths[copy_from], &image.Depths[copy_from] + s_dx, &this->Depths[copy_to]); } } @@ -137,19 +137,19 @@ struct VTKM_RENDERING_EXPORT PayloadImage // void SubsetTo(PayloadImage& image) const { - assert(m_bounds.X.Min >= image.m_bounds.X.Min); - assert(m_bounds.Y.Min >= image.m_bounds.Y.Min); - assert(m_bounds.X.Max <= image.m_bounds.X.Max); - assert(m_bounds.Y.Max <= image.m_bounds.Y.Max); + assert(this->Bounds.X.Min >= image.Bounds.X.Min); + assert(this->Bounds.Y.Min >= image.Bounds.Y.Min); + assert(this->Bounds.X.Max <= image.Bounds.X.Max); + assert(this->Bounds.Y.Max <= image.Bounds.Y.Max); - const int s_dx = m_bounds.X.Max - m_bounds.X.Min + 1; - const int s_dy = m_bounds.Y.Max - m_bounds.Y.Min + 1; + const int s_dx = this->Bounds.X.Max - this->Bounds.X.Min + 1; + const int s_dy = this->Bounds.Y.Max - this->Bounds.Y.Min + 1; - const int dx = image.m_bounds.X.Max - image.m_bounds.X.Min + 1; - //const int dy = image.m_bounds.Y.Max - image.m_bounds.Y.Min + 1; + const int dx = image.Bounds.X.Max - image.Bounds.X.Min + 1; + //const int dy = image.Bounds.Y.Max - image.Bounds.Y.Min + 1; - const int start_x = m_bounds.X.Min - image.m_bounds.X.Min; - const int start_y = m_bounds.Y.Min - image.m_bounds.Y.Min; + const int start_x = this->Bounds.X.Min - image.Bounds.X.Min; + const int start_y = this->Bounds.Y.Min - image.Bounds.Y.Min; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for @@ -159,44 +159,44 @@ struct VTKM_RENDERING_EXPORT PayloadImage const int copy_to = (y + start_y) * dx + start_x; const int copy_from = y * s_dx; - std::copy(&m_payloads[copy_from * m_payload_bytes], - &m_payloads[copy_from * m_payload_bytes] + s_dx * m_payload_bytes, - &image.m_payloads[copy_to * m_payload_bytes]); + std::copy(&this->Payloads[copy_from * this->PayloadBytes], + &this->Payloads[copy_from * this->PayloadBytes] + s_dx * this->PayloadBytes, + &image.Payloads[copy_to * this->PayloadBytes]); - std::copy(&m_depths[copy_from], &m_depths[copy_from] + s_dx, &image.m_depths[copy_to]); + std::copy(&this->Depths[copy_from], &this->Depths[copy_from] + s_dx, &image.Depths[copy_to]); } } void Swap(PayloadImage& other) { - vtkm::Bounds orig = m_orig_bounds; - vtkm::Bounds bounds = m_bounds; + vtkm::Bounds orig = this->OrigBounds; + vtkm::Bounds bounds = this->Bounds; - m_orig_bounds = other.m_orig_bounds; - m_bounds = other.m_bounds; + this->OrigBounds = other.OrigBounds; + this->Bounds = other.Bounds; - other.m_orig_bounds = orig; - other.m_bounds = bounds; + other.OrigBounds = orig; + other.Bounds = bounds; - m_payloads.swap(other.m_payloads); - m_depths.swap(other.m_depths); + this->Payloads.swap(other.Payloads); + this->Depths.swap(other.Depths); } void Clear() { vtkm::Bounds empty; - m_orig_bounds = empty; - m_bounds = empty; - m_payloads.clear(); - m_depths.clear(); + this->OrigBounds = empty; + this->Bounds = empty; + this->Payloads.clear(); + this->Depths.clear(); } std::string ToString() const { std::stringstream ss; - ss << "Total size pixels " << (int)m_depths.size(); - ss << " tile dims: {" << m_bounds.X.Min << "," << m_bounds.Y.Min << "} - "; - ss << "{" << m_bounds.X.Max << "," << m_bounds.Y.Max << "}\n"; + ss << "Total size pixels " << (int)this->Depths.size(); + ss << " tile dims: {" << this->Bounds.X.Min << "," << this->Bounds.Y.Min << "} - "; + ss << "{" << this->Bounds.X.Max << "," << this->Bounds.Y.Max << "}\n"; ; return ss.str(); } diff --git a/vtkm/rendering/compositing/PayloadImageCompositor.h b/vtkm/rendering/compositing/PayloadImageCompositor.h index 8ea3c4f4a..2bfb4dedd 100644 --- a/vtkm/rendering/compositing/PayloadImageCompositor.h +++ b/vtkm/rendering/compositing/PayloadImageCompositor.h @@ -32,36 +32,36 @@ public: void ZBufferComposite(vtkm::rendering::compositing::PayloadImage& front, const vtkm::rendering::compositing::PayloadImage& image) { - if (front.m_payload_bytes != image.m_payload_bytes) + if (front.PayloadBytes != image.PayloadBytes) { std::cout << "very bad\n"; } - assert(front.m_depths.size() == front.m_payloads.size() / front.m_payload_bytes); - assert(front.m_bounds.X.Min == image.m_bounds.X.Min); - assert(front.m_bounds.Y.Min == image.m_bounds.Y.Min); - assert(front.m_bounds.X.Max == image.m_bounds.X.Max); - assert(front.m_bounds.Y.Max == image.m_bounds.Y.Max); + assert(front.Depths.size() == front.Payloads.size() / front.PayloadBytes); + assert(front.Bounds.X.Min == image.Bounds.X.Min); + assert(front.Bounds.Y.Min == image.Bounds.Y.Min); + assert(front.Bounds.X.Max == image.Bounds.X.Max); + assert(front.Bounds.Y.Max == image.Bounds.Y.Max); - const int size = static_cast(front.m_depths.size()); - const bool nan_check = image.m_default_value != image.m_default_value; + const int size = static_cast(front.Depths.size()); + const bool nan_check = image.DefaultValue != image.DefaultValue; #ifdef VTKH_OPENMP_ENABLED #pragma omp parallel for #endif for (int i = 0; i < size; ++i) { - const float depth = image.m_depths[i]; - const float fdepth = front.m_depths[i]; + const float depth = image.Depths[i]; + const float fdepth = front.Depths[i]; // this should handle NaNs correctly const bool take_back = fmin(depth, fdepth) == depth; if (take_back) { const int offset = i * 4; - front.m_depths[i] = depth; - const size_t p_offset = i * front.m_payload_bytes; - std::copy(&image.m_payloads[p_offset], - &image.m_payloads[p_offset] + front.m_payload_bytes, - &front.m_payloads[p_offset]); + front.Depths[i] = depth; + const size_t p_offset = i * front.PayloadBytes; + std::copy(&image.Payloads[p_offset], + &image.Payloads[p_offset] + front.PayloadBytes, + &front.Payloads[p_offset]); } } } diff --git a/vtkm/rendering/compositing/RadixKCompositor.cxx b/vtkm/rendering/compositing/RadixKCompositor.cxx index 1bbdc2ace..af1a1f204 100644 --- a/vtkm/rendering/compositing/RadixKCompositor.cxx +++ b/vtkm/rendering/compositing/RadixKCompositor.cxx @@ -84,7 +84,7 @@ void reduce_images(void* b, const int current_dim = partners.dim(round); //create balanced set of ranges for current dim - vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.m_bounds); + vtkmdiy::DiscreteBounds image_bounds = vtkh::VTKMBoundsToDIY(image.Bounds); int range_length = image_bounds.max[current_dim] - image_bounds.min[current_dim]; int base_step = range_length / group_size; int rem = range_length % group_size; @@ -102,7 +102,7 @@ void reduce_images(void* b, assert(count == range_length); std::vector subset_bounds(group_size, - vtkh::VTKMBoundsToDIY(image.m_bounds)); + vtkh::VTKMBoundsToDIY(image.Bounds)); int min_pixel = image_bounds.min[current_dim]; for (int i = 0; i < group_size; ++i) { @@ -150,7 +150,7 @@ RadixKCompositor::~RadixKCompositor() {} template void RadixKCompositor::CompositeImpl(vtkmdiy::mpi::communicator& diy_comm, ImageType& image) { - vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.m_orig_bounds); + vtkmdiy::DiscreteBounds global_bounds = vtkh::VTKMBoundsToDIY(image.OrigBounds); // tells diy to use one thread const int num_threads = 1; diff --git a/vtkm/rendering/compositing/vtkm_diy_image_block.h b/vtkm/rendering/compositing/vtkm_diy_image_block.h index 2e9120946..ed074a4a2 100644 --- a/vtkm/rendering/compositing/vtkm_diy_image_block.h +++ b/vtkm/rendering/compositing/vtkm_diy_image_block.h @@ -99,46 +99,46 @@ struct Serialization { static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::PayloadImage& image) { - vtkmdiy::save(bb, image.m_orig_bounds.X.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::save(bb, image.m_orig_bounds.X.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::save(bb, image.OrigBounds.X.Min); + vtkmdiy::save(bb, image.OrigBounds.Y.Min); + vtkmdiy::save(bb, image.OrigBounds.Z.Min); + vtkmdiy::save(bb, image.OrigBounds.X.Max); + vtkmdiy::save(bb, image.OrigBounds.Y.Max); + vtkmdiy::save(bb, image.OrigBounds.Z.Max); - vtkmdiy::save(bb, image.m_bounds.X.Min); - vtkmdiy::save(bb, image.m_bounds.Y.Min); - vtkmdiy::save(bb, image.m_bounds.Z.Min); - vtkmdiy::save(bb, image.m_bounds.X.Max); - vtkmdiy::save(bb, image.m_bounds.Y.Max); - vtkmdiy::save(bb, image.m_bounds.Z.Max); + vtkmdiy::save(bb, image.Bounds.X.Min); + vtkmdiy::save(bb, image.Bounds.Y.Min); + vtkmdiy::save(bb, image.Bounds.Z.Min); + vtkmdiy::save(bb, image.Bounds.X.Max); + vtkmdiy::save(bb, image.Bounds.Y.Max); + vtkmdiy::save(bb, image.Bounds.Z.Max); - vtkmdiy::save(bb, image.m_payloads); - vtkmdiy::save(bb, image.m_payload_bytes); - vtkmdiy::save(bb, image.m_depths); - vtkmdiy::save(bb, image.m_orig_rank); + vtkmdiy::save(bb, image.Payloads); + vtkmdiy::save(bb, image.PayloadBytes); + vtkmdiy::save(bb, image.Depths); + vtkmdiy::save(bb, image.OrigRank); } static void load(BinaryBuffer& bb, vtkm::rendering::compositing::PayloadImage& image) { - vtkmdiy::load(bb, image.m_orig_bounds.X.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::load(bb, image.m_orig_bounds.X.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::load(bb, image.OrigBounds.X.Min); + vtkmdiy::load(bb, image.OrigBounds.Y.Min); + vtkmdiy::load(bb, image.OrigBounds.Z.Min); + vtkmdiy::load(bb, image.OrigBounds.X.Max); + vtkmdiy::load(bb, image.OrigBounds.Y.Max); + vtkmdiy::load(bb, image.OrigBounds.Z.Max); - vtkmdiy::load(bb, image.m_bounds.X.Min); - vtkmdiy::load(bb, image.m_bounds.Y.Min); - vtkmdiy::load(bb, image.m_bounds.Z.Min); - vtkmdiy::load(bb, image.m_bounds.X.Max); - vtkmdiy::load(bb, image.m_bounds.Y.Max); - vtkmdiy::load(bb, image.m_bounds.Z.Max); + vtkmdiy::load(bb, image.Bounds.X.Min); + vtkmdiy::load(bb, image.Bounds.Y.Min); + vtkmdiy::load(bb, image.Bounds.Z.Min); + vtkmdiy::load(bb, image.Bounds.X.Max); + vtkmdiy::load(bb, image.Bounds.Y.Max); + vtkmdiy::load(bb, image.Bounds.Z.Max); - vtkmdiy::load(bb, image.m_payloads); - vtkmdiy::load(bb, image.m_payload_bytes); - vtkmdiy::load(bb, image.m_depths); - vtkmdiy::load(bb, image.m_orig_rank); + vtkmdiy::load(bb, image.Payloads); + vtkmdiy::load(bb, image.PayloadBytes); + vtkmdiy::load(bb, image.Depths); + vtkmdiy::load(bb, image.OrigRank); } }; @@ -147,46 +147,46 @@ struct Serialization { static void save(BinaryBuffer& bb, const vtkm::rendering::compositing::Image& image) { - vtkmdiy::save(bb, image.m_orig_bounds.X.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::save(bb, image.m_orig_bounds.X.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::save(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::save(bb, image.OrigBounds.X.Min); + vtkmdiy::save(bb, image.OrigBounds.Y.Min); + vtkmdiy::save(bb, image.OrigBounds.Z.Min); + vtkmdiy::save(bb, image.OrigBounds.X.Max); + vtkmdiy::save(bb, image.OrigBounds.Y.Max); + vtkmdiy::save(bb, image.OrigBounds.Z.Max); - vtkmdiy::save(bb, image.m_bounds.X.Min); - vtkmdiy::save(bb, image.m_bounds.Y.Min); - vtkmdiy::save(bb, image.m_bounds.Z.Min); - vtkmdiy::save(bb, image.m_bounds.X.Max); - vtkmdiy::save(bb, image.m_bounds.Y.Max); - vtkmdiy::save(bb, image.m_bounds.Z.Max); + vtkmdiy::save(bb, image.Bounds.X.Min); + vtkmdiy::save(bb, image.Bounds.Y.Min); + vtkmdiy::save(bb, image.Bounds.Z.Min); + vtkmdiy::save(bb, image.Bounds.X.Max); + vtkmdiy::save(bb, image.Bounds.Y.Max); + vtkmdiy::save(bb, image.Bounds.Z.Max); - vtkmdiy::save(bb, image.m_pixels); - vtkmdiy::save(bb, image.m_depths); - vtkmdiy::save(bb, image.m_orig_rank); - vtkmdiy::save(bb, image.m_composite_order); + vtkmdiy::save(bb, image.Pixels); + vtkmdiy::save(bb, image.Depths); + vtkmdiy::save(bb, image.OrigRank); + vtkmdiy::save(bb, image.CompositeOrder); } static void load(BinaryBuffer& bb, vtkm::rendering::compositing::Image& image) { - vtkmdiy::load(bb, image.m_orig_bounds.X.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Min); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Min); - vtkmdiy::load(bb, image.m_orig_bounds.X.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Y.Max); - vtkmdiy::load(bb, image.m_orig_bounds.Z.Max); + vtkmdiy::load(bb, image.OrigBounds.X.Min); + vtkmdiy::load(bb, image.OrigBounds.Y.Min); + vtkmdiy::load(bb, image.OrigBounds.Z.Min); + vtkmdiy::load(bb, image.OrigBounds.X.Max); + vtkmdiy::load(bb, image.OrigBounds.Y.Max); + vtkmdiy::load(bb, image.OrigBounds.Z.Max); - vtkmdiy::load(bb, image.m_bounds.X.Min); - vtkmdiy::load(bb, image.m_bounds.Y.Min); - vtkmdiy::load(bb, image.m_bounds.Z.Min); - vtkmdiy::load(bb, image.m_bounds.X.Max); - vtkmdiy::load(bb, image.m_bounds.Y.Max); - vtkmdiy::load(bb, image.m_bounds.Z.Max); + vtkmdiy::load(bb, image.Bounds.X.Min); + vtkmdiy::load(bb, image.Bounds.Y.Min); + vtkmdiy::load(bb, image.Bounds.Z.Min); + vtkmdiy::load(bb, image.Bounds.X.Max); + vtkmdiy::load(bb, image.Bounds.Y.Max); + vtkmdiy::load(bb, image.Bounds.Z.Max); - vtkmdiy::load(bb, image.m_pixels); - vtkmdiy::load(bb, image.m_depths); - vtkmdiy::load(bb, image.m_orig_rank); - vtkmdiy::load(bb, image.m_composite_order); + vtkmdiy::load(bb, image.Pixels); + vtkmdiy::load(bb, image.Depths); + vtkmdiy::load(bb, image.OrigRank); + vtkmdiy::load(bb, image.CompositeOrder); } }; } //namespace vtkmdiy