2017-02-22 13:10:02 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2011-2016 Blender Foundation
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 18:39:14 +00:00
|
|
|
#include "device/device_split_kernel.h"
|
2017-02-22 13:10:02 +00:00
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 18:39:14 +00:00
|
|
|
#include "kernel/kernel_types.h"
|
|
|
|
#include "kernel/split/kernel_split_data_types.h"
|
2017-02-22 13:10:02 +00:00
|
|
|
|
2017-04-08 05:35:32 +00:00
|
|
|
#include "util/util_logging.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 18:39:14 +00:00
|
|
|
#include "util/util_time.h"
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
|
|
static const double alpha = 0.1; /* alpha for rolling average */
|
|
|
|
|
|
|
|
DeviceSplitKernel::DeviceSplitKernel(Device *device) : device(device)
|
|
|
|
{
|
|
|
|
current_max_closure = -1;
|
|
|
|
first_tile = true;
|
|
|
|
|
|
|
|
avg_time_per_sample = 0.0;
|
2017-03-04 08:29:03 +00:00
|
|
|
|
|
|
|
kernel_path_init = NULL;
|
|
|
|
kernel_scene_intersect = NULL;
|
|
|
|
kernel_lamp_emission = NULL;
|
2017-03-08 14:42:26 +00:00
|
|
|
kernel_do_volume = NULL;
|
2017-03-04 08:29:03 +00:00
|
|
|
kernel_queue_enqueue = NULL;
|
2017-03-08 14:42:26 +00:00
|
|
|
kernel_indirect_background = NULL;
|
2017-05-03 13:30:45 +00:00
|
|
|
kernel_shader_setup = NULL;
|
|
|
|
kernel_shader_sort = NULL;
|
2017-03-04 08:29:03 +00:00
|
|
|
kernel_shader_eval = NULL;
|
|
|
|
kernel_holdout_emission_blurring_pathtermination_ao = NULL;
|
2017-03-08 14:42:26 +00:00
|
|
|
kernel_subsurface_scatter = NULL;
|
2017-03-04 08:29:03 +00:00
|
|
|
kernel_direct_lighting = NULL;
|
2017-03-08 16:39:40 +00:00
|
|
|
kernel_shadow_blocked_ao = NULL;
|
|
|
|
kernel_shadow_blocked_dl = NULL;
|
2017-05-30 00:40:26 +00:00
|
|
|
kernel_enqueue_inactive = NULL;
|
2017-03-04 08:29:03 +00:00
|
|
|
kernel_next_iteration_setup = NULL;
|
2017-03-08 14:42:26 +00:00
|
|
|
kernel_indirect_subsurface = NULL;
|
|
|
|
kernel_buffer_update = NULL;
|
2017-02-22 13:10:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DeviceSplitKernel::~DeviceSplitKernel()
|
|
|
|
{
|
|
|
|
device->mem_free(split_data);
|
|
|
|
device->mem_free(ray_state);
|
|
|
|
device->mem_free(use_queues_flag);
|
|
|
|
device->mem_free(queue_index);
|
|
|
|
device->mem_free(work_pool_wgs);
|
|
|
|
|
2017-03-01 06:05:55 +00:00
|
|
|
delete kernel_path_init;
|
2017-02-22 13:10:02 +00:00
|
|
|
delete kernel_scene_intersect;
|
|
|
|
delete kernel_lamp_emission;
|
2017-03-08 14:42:26 +00:00
|
|
|
delete kernel_do_volume;
|
2017-02-22 13:10:02 +00:00
|
|
|
delete kernel_queue_enqueue;
|
2017-03-08 14:42:26 +00:00
|
|
|
delete kernel_indirect_background;
|
2017-05-03 13:30:45 +00:00
|
|
|
delete kernel_shader_setup;
|
|
|
|
delete kernel_shader_sort;
|
2017-02-22 13:10:02 +00:00
|
|
|
delete kernel_shader_eval;
|
|
|
|
delete kernel_holdout_emission_blurring_pathtermination_ao;
|
2017-03-08 14:42:26 +00:00
|
|
|
delete kernel_subsurface_scatter;
|
2017-02-22 13:10:02 +00:00
|
|
|
delete kernel_direct_lighting;
|
2017-03-08 16:39:40 +00:00
|
|
|
delete kernel_shadow_blocked_ao;
|
|
|
|
delete kernel_shadow_blocked_dl;
|
2017-05-30 00:40:26 +00:00
|
|
|
delete kernel_enqueue_inactive;
|
2017-02-22 13:10:02 +00:00
|
|
|
delete kernel_next_iteration_setup;
|
2017-03-08 14:42:26 +00:00
|
|
|
delete kernel_indirect_subsurface;
|
|
|
|
delete kernel_buffer_update;
|
2017-02-22 13:10:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool DeviceSplitKernel::load_kernels(const DeviceRequestedFeatures& requested_features)
|
|
|
|
{
|
|
|
|
#define LOAD_KERNEL(name) \
|
|
|
|
kernel_##name = get_split_kernel_function(#name, requested_features); \
|
|
|
|
if(!kernel_##name) { \
|
|
|
|
return false; \
|
|
|
|
}
|
|
|
|
|
2017-03-01 06:05:55 +00:00
|
|
|
LOAD_KERNEL(path_init);
|
2017-02-22 13:10:02 +00:00
|
|
|
LOAD_KERNEL(scene_intersect);
|
|
|
|
LOAD_KERNEL(lamp_emission);
|
2017-03-08 14:42:26 +00:00
|
|
|
LOAD_KERNEL(do_volume);
|
2017-02-22 13:10:02 +00:00
|
|
|
LOAD_KERNEL(queue_enqueue);
|
2017-03-08 14:42:26 +00:00
|
|
|
LOAD_KERNEL(indirect_background);
|
2017-05-03 13:30:45 +00:00
|
|
|
LOAD_KERNEL(shader_setup);
|
|
|
|
LOAD_KERNEL(shader_sort);
|
2017-02-22 13:10:02 +00:00
|
|
|
LOAD_KERNEL(shader_eval);
|
|
|
|
LOAD_KERNEL(holdout_emission_blurring_pathtermination_ao);
|
2017-03-08 14:42:26 +00:00
|
|
|
LOAD_KERNEL(subsurface_scatter);
|
2017-02-22 13:10:02 +00:00
|
|
|
LOAD_KERNEL(direct_lighting);
|
2017-03-08 16:39:40 +00:00
|
|
|
LOAD_KERNEL(shadow_blocked_ao);
|
|
|
|
LOAD_KERNEL(shadow_blocked_dl);
|
2017-05-30 00:40:26 +00:00
|
|
|
LOAD_KERNEL(enqueue_inactive);
|
2017-02-22 13:10:02 +00:00
|
|
|
LOAD_KERNEL(next_iteration_setup);
|
2017-03-08 14:42:26 +00:00
|
|
|
LOAD_KERNEL(indirect_subsurface);
|
|
|
|
LOAD_KERNEL(buffer_update);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
#undef LOAD_KERNEL
|
|
|
|
|
|
|
|
current_max_closure = requested_features.max_closure;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-11 10:23:11 +00:00
|
|
|
size_t DeviceSplitKernel::max_elements_for_max_buffer_size(device_memory& kg, device_memory& data, uint64_t max_buffer_size)
|
2017-02-22 13:10:02 +00:00
|
|
|
{
|
2017-03-11 10:23:11 +00:00
|
|
|
uint64_t size_per_element = state_buffer_size(kg, data, 1024) / 1024;
|
2017-04-08 05:35:32 +00:00
|
|
|
VLOG(1) << "Split state element size: "
|
|
|
|
<< string_human_readable_number(size_per_element) << " bytes. ("
|
|
|
|
<< string_human_readable_size(size_per_element) << ").";
|
2017-02-22 13:10:02 +00:00
|
|
|
return max_buffer_size / size_per_element;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DeviceSplitKernel::path_trace(DeviceTask *task,
|
|
|
|
RenderTile& tile,
|
|
|
|
device_memory& kgbuffer,
|
|
|
|
device_memory& kernel_data)
|
|
|
|
{
|
|
|
|
if(device->have_error()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get local size */
|
|
|
|
size_t local_size[2];
|
|
|
|
{
|
|
|
|
int2 lsize = split_kernel_local_size();
|
|
|
|
local_size[0] = lsize[0];
|
|
|
|
local_size[1] = lsize[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Number of elements in the global state buffer */
|
|
|
|
int num_global_elements = global_size[0] * global_size[1];
|
|
|
|
|
|
|
|
/* Allocate all required global memory once. */
|
|
|
|
if(first_tile) {
|
|
|
|
first_tile = false;
|
|
|
|
|
2017-04-11 07:02:43 +00:00
|
|
|
/* Set gloabl size */
|
|
|
|
{
|
|
|
|
int2 gsize = split_kernel_global_size(kgbuffer, kernel_data, task);
|
|
|
|
|
|
|
|
/* Make sure that set work size is a multiple of local
|
|
|
|
* work size dimensions.
|
|
|
|
*/
|
|
|
|
global_size[0] = round_up(gsize[0], local_size[0]);
|
|
|
|
global_size[1] = round_up(gsize[1], local_size[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
num_global_elements = global_size[0] * global_size[1];
|
|
|
|
assert(num_global_elements % WORK_POOL_SIZE == 0);
|
|
|
|
|
2017-02-22 13:10:02 +00:00
|
|
|
/* Calculate max groups */
|
|
|
|
|
|
|
|
/* Denotes the maximum work groups possible w.r.t. current requested tile size. */
|
2017-04-06 09:08:35 +00:00
|
|
|
unsigned int work_pool_size = (device->info.type == DEVICE_CPU) ? WORK_POOL_SIZE_CPU : WORK_POOL_SIZE_GPU;
|
|
|
|
unsigned int max_work_groups = num_global_elements / work_pool_size + 1;
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
/* Allocate work_pool_wgs memory. */
|
2017-05-07 12:40:58 +00:00
|
|
|
work_pool_wgs.resize(max_work_groups);
|
2016-12-14 01:45:09 +00:00
|
|
|
device->mem_alloc("work_pool_wgs", work_pool_wgs, MEM_READ_WRITE);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
2017-05-07 12:40:58 +00:00
|
|
|
queue_index.resize(NUM_QUEUES);
|
2016-12-14 01:45:09 +00:00
|
|
|
device->mem_alloc("queue_index", queue_index, MEM_READ_WRITE);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
2017-05-07 12:40:58 +00:00
|
|
|
use_queues_flag.resize(1);
|
2016-12-14 01:45:09 +00:00
|
|
|
device->mem_alloc("use_queues_flag", use_queues_flag, MEM_READ_WRITE);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
ray_state.resize(num_global_elements);
|
2016-12-14 01:45:09 +00:00
|
|
|
device->mem_alloc("ray_state", ray_state, MEM_READ_WRITE);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
2017-03-04 11:29:01 +00:00
|
|
|
split_data.resize(state_buffer_size(kgbuffer, kernel_data, num_global_elements));
|
2016-12-14 01:45:09 +00:00
|
|
|
device->mem_alloc("split_data", split_data, MEM_READ_WRITE);
|
2017-02-22 13:10:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define ENQUEUE_SPLIT_KERNEL(name, global_size, local_size) \
|
|
|
|
if(device->have_error()) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
if(!kernel_##name->enqueue(KernelDimensions(global_size, local_size), kgbuffer, kernel_data)) { \
|
|
|
|
return false; \
|
|
|
|
}
|
|
|
|
|
|
|
|
tile.sample = tile.start_sample;
|
|
|
|
|
|
|
|
/* for exponential increase between tile updates */
|
|
|
|
int time_multiplier = 1;
|
|
|
|
|
|
|
|
while(tile.sample < tile.start_sample + tile.num_samples) {
|
|
|
|
/* to keep track of how long it takes to run a number of samples */
|
|
|
|
double start_time = time_dt();
|
|
|
|
|
|
|
|
/* initial guess to start rolling average */
|
|
|
|
const int initial_num_samples = 1;
|
|
|
|
/* approx number of samples per second */
|
|
|
|
int samples_per_second = (avg_time_per_sample > 0.0) ?
|
|
|
|
int(double(time_multiplier) / avg_time_per_sample) + 1 : initial_num_samples;
|
|
|
|
|
|
|
|
RenderTile subtile = tile;
|
|
|
|
subtile.start_sample = tile.sample;
|
|
|
|
subtile.num_samples = min(samples_per_second, tile.start_sample + tile.num_samples - tile.sample);
|
|
|
|
|
|
|
|
if(device->have_error()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reset state memory here as global size for data_init
|
|
|
|
* kernel might not be large enough to do in kernel
|
|
|
|
*/
|
|
|
|
device->mem_zero(work_pool_wgs);
|
|
|
|
device->mem_zero(split_data);
|
2017-03-11 12:03:17 +00:00
|
|
|
device->mem_zero(ray_state);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
if(!enqueue_split_kernel_data_init(KernelDimensions(global_size, local_size),
|
2017-03-07 10:21:36 +00:00
|
|
|
subtile,
|
|
|
|
num_global_elements,
|
|
|
|
kgbuffer,
|
|
|
|
kernel_data,
|
|
|
|
split_data,
|
|
|
|
ray_state,
|
|
|
|
queue_index,
|
|
|
|
use_queues_flag,
|
|
|
|
work_pool_wgs))
|
2017-02-22 13:10:02 +00:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-01 06:05:55 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(path_init, global_size, local_size);
|
|
|
|
|
2017-02-22 13:10:02 +00:00
|
|
|
bool activeRaysAvailable = true;
|
2017-04-26 14:22:48 +00:00
|
|
|
double cancel_time = DBL_MAX;
|
2017-02-22 13:10:02 +00:00
|
|
|
|
|
|
|
while(activeRaysAvailable) {
|
|
|
|
/* Do path-iteration in host [Enqueue Path-iteration kernels. */
|
|
|
|
for(int PathIter = 0; PathIter < 16; PathIter++) {
|
|
|
|
ENQUEUE_SPLIT_KERNEL(scene_intersect, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(lamp_emission, global_size, local_size);
|
2017-03-08 14:42:26 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(do_volume, global_size, local_size);
|
2017-02-22 13:10:02 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
|
2017-03-08 14:42:26 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(indirect_background, global_size, local_size);
|
2017-05-03 13:30:45 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(shader_setup, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(shader_sort, global_size, local_size);
|
2017-02-22 13:10:02 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(shader_eval, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao, global_size, local_size);
|
2017-03-08 14:42:26 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(subsurface_scatter, global_size, local_size);
|
2017-03-21 02:31:54 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
|
2017-02-22 13:10:02 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(direct_lighting, global_size, local_size);
|
2017-03-08 16:39:40 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(shadow_blocked_ao, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(shadow_blocked_dl, global_size, local_size);
|
2017-05-30 01:35:27 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(enqueue_inactive, global_size, local_size);
|
2017-02-22 13:10:02 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(next_iteration_setup, global_size, local_size);
|
2017-03-08 14:42:26 +00:00
|
|
|
ENQUEUE_SPLIT_KERNEL(indirect_subsurface, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
|
|
|
|
ENQUEUE_SPLIT_KERNEL(buffer_update, global_size, local_size);
|
2017-02-22 13:10:02 +00:00
|
|
|
|
2017-04-26 14:22:48 +00:00
|
|
|
if(task->get_cancel() && cancel_time == DBL_MAX) {
|
|
|
|
/* Wait up to twice as many seconds for current samples to finish
|
|
|
|
* to avoid artifacts in render result from ending too soon.
|
|
|
|
*/
|
|
|
|
cancel_time = time_dt() + 2.0 * time_multiplier;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(time_dt() > cancel_time) {
|
2017-02-22 13:10:02 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decide if we should exit path-iteration in host. */
|
|
|
|
device->mem_copy_from(ray_state, 0, global_size[0] * global_size[1] * sizeof(char), 1, 1);
|
|
|
|
|
|
|
|
activeRaysAvailable = false;
|
|
|
|
|
|
|
|
for(int rayStateIter = 0; rayStateIter < global_size[0] * global_size[1]; ++rayStateIter) {
|
2017-03-28 06:28:36 +00:00
|
|
|
if(!IS_STATE(ray_state.get_data(), rayStateIter, RAY_INACTIVE)) {
|
|
|
|
if(IS_STATE(ray_state.get_data(), rayStateIter, RAY_INVALID)) {
|
2017-03-11 12:03:17 +00:00
|
|
|
/* Something went wrong, abort to avoid looping endlessly. */
|
|
|
|
device->set_error("Split kernel error: invalid ray state");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-22 13:10:02 +00:00
|
|
|
/* Not all rays are RAY_INACTIVE. */
|
|
|
|
activeRaysAvailable = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-26 14:22:48 +00:00
|
|
|
if(time_dt() > cancel_time) {
|
2017-02-22 13:10:02 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double time_per_sample = ((time_dt()-start_time) / subtile.num_samples);
|
|
|
|
|
|
|
|
if(avg_time_per_sample == 0.0) {
|
|
|
|
/* start rolling average */
|
|
|
|
avg_time_per_sample = time_per_sample;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
avg_time_per_sample = alpha*time_per_sample + (1.0-alpha)*avg_time_per_sample;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef ENQUEUE_SPLIT_KERNEL
|
|
|
|
|
|
|
|
tile.sample += subtile.num_samples;
|
|
|
|
task->update_progress(&tile, tile.w*tile.h*subtile.num_samples);
|
|
|
|
|
|
|
|
time_multiplier = min(time_multiplier << 1, 10);
|
|
|
|
|
|
|
|
if(task->get_cancel()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
CCL_NAMESPACE_END
|
|
|
|
|
|
|
|
|