Merge branch 'master' into blender2.8

Conflicts:
	intern/cycles/device/device.cpp
	source/blender/blenkernel/intern/library.c
	source/blender/blenkernel/intern/material.c
	source/blender/editors/object/object_add.c
	source/blender/editors/object/object_relations.c
	source/blender/editors/space_outliner/outliner_draw.c
	source/blender/editors/space_outliner/outliner_edit.c
	source/blender/editors/space_view3d/drawobject.c
	source/blender/editors/util/ed_util.c
	source/blender/windowmanager/intern/wm_files_link.c
This commit is contained in:
Bastien Montagne 2017-11-06 18:02:46 +01:00
commit 91af8f2ae2
116 changed files with 762 additions and 620 deletions

@ -248,12 +248,12 @@ void Device::draw_pixels(
glBindTexture(GL_TEXTURE_2D, texid);
if(rgba.data_type == TYPE_HALF) {
GLhalf *data_pointer = (GLhalf*)rgba.data_pointer;
GLhalf *data_pointer = (GLhalf*)rgba.host_pointer;
data_pointer += 4 * y * w;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F_ARB, w, h, 0, GL_RGBA, GL_HALF_FLOAT, data_pointer);
}
else {
uint8_t *data_pointer = (uint8_t*)rgba.data_pointer;
uint8_t *data_pointer = (uint8_t*)rgba.host_pointer;
data_pointer += 4 * y * w;
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data_pointer);
}

@ -297,10 +297,14 @@ public:
<< string_human_readable_size(mem.memory_size()) << ")";
}
mem.device_pointer = mem.data_pointer;
if(!mem.device_pointer) {
mem.device_pointer = (device_ptr)malloc(mem.memory_size());
if(mem.type == MEM_DEVICE_ONLY) {
assert(!mem.host_pointer);
size_t alignment = mem_address_alignment();
void *data = util_aligned_malloc(mem.memory_size(), alignment);
mem.device_pointer = (device_ptr)data;
}
else {
mem.device_pointer = (device_ptr)mem.host_pointer;
}
mem.device_size = mem.memory_size();
@ -350,8 +354,8 @@ public:
tex_free(mem);
}
else if(mem.device_pointer) {
if(!mem.data_pointer) {
free((void*)mem.device_pointer);
if(mem.type == MEM_DEVICE_ONLY) {
util_aligned_free((void*)mem.device_pointer);
}
mem.device_pointer = 0;
stats.mem_free(mem.device_size);
@ -379,7 +383,7 @@ public:
/* Data texture. */
kernel_tex_copy(&kernel_globals,
mem.name,
mem.data_pointer,
mem.host_pointer,
mem.data_size);
}
else {
@ -400,7 +404,7 @@ public:
}
TextureInfo& info = texture_info[flat_slot];
info.data = (uint64_t)mem.data_pointer;
info.data = (uint64_t)mem.host_pointer;
info.cl_buffer = 0;
info.interpolation = mem.interpolation;
info.extension = mem.extension;
@ -411,7 +415,7 @@ public:
need_texture_info = true;
}
mem.device_pointer = mem.data_pointer;
mem.device_pointer = (device_ptr)mem.host_pointer;
mem.device_size = mem.memory_size();
stats.mem_alloc(mem.device_size);
}
@ -457,7 +461,7 @@ public:
bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
{
TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
TilesInfo *tiles = (TilesInfo*) task->tiles_mem.host_pointer;
for(int i = 0; i < 9; i++) {
tiles->buffers[i] = buffers[i];
}

@ -128,20 +128,26 @@ public:
CUdevice cuDevice;
CUcontext cuContext;
CUmodule cuModule, cuFilterModule;
map<device_ptr, bool> tex_interp_map;
map<device_ptr, CUtexObject> tex_bindless_map;
int cuDevId;
int cuDevArchitecture;
bool first_error;
CUDASplitKernel *split_kernel;
struct CUDAMem {
CUDAMem()
: texobject(0), array(0) {}
CUtexObject texobject;
CUarray array;
};
map<device_memory*, CUDAMem> cuda_mem_map;
struct PixelMem {
GLuint cuPBO;
CUgraphicsResource cuPBOresource;
GLuint cuTexId;
int w, h;
};
map<device_ptr, PixelMem> pixel_mem_map;
/* Bindless Textures */
@ -234,24 +240,29 @@ public:
need_texture_info = false;
/* intialize */
/* Intialize CUDA. */
if(cuda_error(cuInit(0)))
return;
/* setup device and context */
/* Setup device and context. */
if(cuda_error(cuDeviceGet(&cuDevice, cuDevId)))
return;
/* CU_CTX_LMEM_RESIZE_TO_MAX for reserving local memory ahead of render,
* so we can predict which memory to map to host. */
unsigned int ctx_flags = CU_CTX_LMEM_RESIZE_TO_MAX;
/* Create context. */
CUresult result;
if(background) {
result = cuCtxCreate(&cuContext, 0, cuDevice);
result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
}
else {
result = cuGLCtxCreate(&cuContext, 0, cuDevice);
result = cuGLCtxCreate(&cuContext, ctx_flags, cuDevice);
if(result != CUDA_SUCCESS) {
result = cuCtxCreate(&cuContext, 0, cuDevice);
result = cuCtxCreate(&cuContext, ctx_flags, cuDevice);
background = true;
}
}
@ -542,9 +553,66 @@ public:
if(cuda_error_(result, "cuModuleLoad"))
cuda_error_message(string_printf("Failed loading CUDA kernel %s.", filter_cubin.c_str()));
if(result == CUDA_SUCCESS) {
reserve_local_memory(requested_features);
}
return (result == CUDA_SUCCESS);
}
void reserve_local_memory(const DeviceRequestedFeatures& requested_features)
{
if(use_split_kernel()) {
/* Split kernel mostly uses global memory and adaptive compilation,
* difficult to predict how much is needed currently. */
return;
}
/* Together with CU_CTX_LMEM_RESIZE_TO_MAX, this reserves local memory
* needed for kernel launches, so that we can reliably figure out when
* to allocate scene data in mapped host memory. */
CUDAContextScope scope(this);
size_t total = 0, free_before = 0, free_after = 0;
cuMemGetInfo(&free_before, &total);
/* Get kernel function. */
CUfunction cuPathTrace;
if(requested_features.use_integrator_branched) {
cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_branched_path_trace"));
}
else {
cuda_assert(cuModuleGetFunction(&cuPathTrace, cuModule, "kernel_cuda_path_trace"));
}
cuda_assert(cuFuncSetCacheConfig(cuPathTrace, CU_FUNC_CACHE_PREFER_L1));
int min_blocks, num_threads_per_block;
cuda_assert(cuOccupancyMaxPotentialBlockSize(&min_blocks, &num_threads_per_block, cuPathTrace, NULL, 0, 0));
/* Launch kernel, using just 1 block appears sufficient to reserve
* memory for all multiprocessors. It would be good to do this in
* parallel for the multi GPU case still to make it faster. */
CUdeviceptr d_work_tiles = 0;
uint total_work_size = 0;
void *args[] = {&d_work_tiles,
&total_work_size};
cuda_assert(cuLaunchKernel(cuPathTrace,
1, 1, 1,
num_threads_per_block, 1, 1,
0, 0, args, 0));
cuda_assert(cuCtxSynchronize());
cuMemGetInfo(&free_after, &total);
VLOG(1) << "Local memory reserved "
<< string_human_readable_number(free_before - free_after) << " bytes. ("
<< string_human_readable_size(free_before - free_after) << ")";
}
void load_texture_info()
{
if(!info.has_fermi_limits && need_texture_info) {
@ -553,7 +621,7 @@ public:
}
}
void generic_alloc(device_memory& mem, size_t padding = 0)
CUDAMem *generic_alloc(device_memory& mem, size_t padding = 0)
{
CUDAContextScope scope(this);
@ -563,19 +631,28 @@ public:
<< string_human_readable_size(mem.memory_size()) << ")";
}
CUdeviceptr device_pointer;
/* Allocate memory on device. */
CUdeviceptr device_pointer = 0;
size_t size = mem.memory_size();
cuda_assert(cuMemAlloc(&device_pointer, size + padding));
mem.device_pointer = (device_ptr)device_pointer;
mem.device_size = size;
stats.mem_alloc(size);
if(!mem.device_pointer) {
return NULL;
}
/* Insert into map of allocations. */
CUDAMem *cmem = &cuda_mem_map[&mem];
return cmem;
}
void generic_copy_to(device_memory& mem)
{
if(mem.device_pointer) {
CUDAContextScope scope(this);
cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), (void*)mem.data_pointer, mem.memory_size()));
cuda_assert(cuMemcpyHtoD(cuda_device_ptr(mem.device_pointer), mem.host_pointer, mem.memory_size()));
}
}
@ -586,10 +663,11 @@ public:
cuda_assert(cuMemFree(cuda_device_ptr(mem.device_pointer)));
mem.device_pointer = 0;
stats.mem_free(mem.device_size);
mem.device_pointer = 0;
mem.device_size = 0;
cuda_mem_map.erase(cuda_mem_map.find(&mem));
}
}
@ -638,11 +716,11 @@ public:
size_t size = elem*w*h;
if(mem.device_pointer) {
cuda_assert(cuMemcpyDtoH((uchar*)mem.data_pointer + offset,
cuda_assert(cuMemcpyDtoH((uchar*)mem.host_pointer + offset,
(CUdeviceptr)(mem.device_pointer + offset), size));
}
else {
memset((char*)mem.data_pointer + offset, 0, size);
memset((char*)mem.host_pointer + offset, 0, size);
}
}
}
@ -653,8 +731,8 @@ public:
mem_alloc(mem);
}
if(mem.data_pointer) {
memset((void*)mem.data_pointer, 0, mem.memory_size());
if(mem.host_pointer) {
memset(mem.host_pointer, 0, mem.memory_size());
}
if(mem.device_pointer) {
@ -752,8 +830,6 @@ public:
uint32_t ptr = (uint32_t)mem.device_pointer;
cuda_assert(cuMemcpyHtoD(cumem, (void*)&ptr, cubytes));
}
tex_interp_map[mem.device_pointer] = false;
return;
}
@ -789,7 +865,7 @@ public:
default: assert(0); return;
}
CUDAMem *cmem = NULL;
CUarray array_3d = NULL;
size_t src_pitch = mem.data_width * dsize * mem.data_elements;
size_t dst_pitch = src_pitch;
@ -816,7 +892,7 @@ public:
param.dstMemoryType = CU_MEMORYTYPE_ARRAY;
param.dstArray = array_3d;
param.srcMemoryType = CU_MEMORYTYPE_HOST;
param.srcHost = (void*)mem.data_pointer;
param.srcHost = mem.host_pointer;
param.srcPitch = src_pitch;
param.WidthInBytes = param.srcPitch;
param.Height = mem.data_height;
@ -827,6 +903,10 @@ public:
mem.device_pointer = (device_ptr)array_3d;
mem.device_size = size;
stats.mem_alloc(size);
cmem = &cuda_mem_map[&mem];
cmem->texobject = 0;
cmem->array = array_3d;
}
else if(mem.data_height > 1) {
/* 2D texture, using pitch aligned linear memory. */
@ -835,7 +915,10 @@ public:
dst_pitch = align_up(src_pitch, alignment);
size_t dst_size = dst_pitch * mem.data_height;
generic_alloc(mem, dst_size - mem.memory_size());
cmem = generic_alloc(mem, dst_size - mem.memory_size());
if(!cmem) {
return;
}
CUDA_MEMCPY2D param;
memset(&param, 0, sizeof(param));
@ -843,7 +926,7 @@ public:
param.dstDevice = mem.device_pointer;
param.dstPitch = dst_pitch;
param.srcMemoryType = CU_MEMORYTYPE_HOST;
param.srcHost = (void*)mem.data_pointer;
param.srcHost = mem.host_pointer;
param.srcPitch = src_pitch;
param.WidthInBytes = param.srcPitch;
param.Height = mem.data_height;
@ -852,8 +935,12 @@ public:
}
else {
/* 1D texture, using linear memory. */
generic_alloc(mem);
cuda_assert(cuMemcpyHtoD(mem.device_pointer, (void*)mem.data_pointer, size));
cmem = generic_alloc(mem);
if(!cmem) {
return;
}
cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
}
if(!has_fermi_limits) {
@ -870,7 +957,7 @@ public:
CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc));
if(mem.data_depth > 1) {
if(array_3d) {
resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
resDesc.res.array.hArray = array_3d;
resDesc.flags = 0;
@ -900,13 +987,7 @@ public:
texDesc.filterMode = filter_mode;
texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
CUtexObject tex = 0;
cuda_assert(cuTexObjectCreate(&tex, &resDesc, &texDesc, NULL));
/* Safety check */
if((uint)tex > UINT_MAX) {
assert(0);
}
cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
/* Resize once */
if(flat_slot >= texture_info.size()) {
@ -917,20 +998,18 @@ public:
/* Set Mapping and tag that we need to (re-)upload to device */
TextureInfo& info = texture_info[flat_slot];
info.data = (uint64_t)tex;
info.data = (uint64_t)cmem->texobject;
info.cl_buffer = 0;
info.interpolation = mem.interpolation;
info.extension = mem.extension;
info.width = mem.data_width;
info.height = mem.data_height;
info.depth = mem.data_depth;
tex_bindless_map[mem.device_pointer] = tex;
need_texture_info = true;
}
else {
/* Fermi, fixed texture slots. */
if(mem.data_depth > 1) {
if(array_3d) {
cuda_assert(cuTexRefSetArray(texref, array_3d, CU_TRSA_OVERRIDE_FORMAT));
}
else if(mem.data_height > 1) {
@ -955,38 +1034,27 @@ public:
cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
}
}
/* Fermi and Kepler */
tex_interp_map[mem.device_pointer] = true;
}
void tex_free(device_memory& mem)
{
if(mem.device_pointer) {
bool interp = tex_interp_map[mem.device_pointer];
tex_interp_map.erase(tex_interp_map.find(mem.device_pointer));
if(interp) {
CUDAContextScope scope(this);
const CUDAMem& cmem = cuda_mem_map[&mem];
if(!info.has_fermi_limits) {
if(cmem.texobject) {
/* Free bindless texture. */
if(tex_bindless_map[mem.device_pointer]) {
CUtexObject tex = tex_bindless_map[mem.device_pointer];
cuTexObjectDestroy(tex);
}
cuTexObjectDestroy(cmem.texobject);
}
if(mem.data_depth > 1) {
if(cmem.array) {
/* Free array. */
cuArrayDestroy((CUarray)mem.device_pointer);
cuArrayDestroy(cmem.array);
stats.mem_free(mem.device_size);
mem.device_pointer = 0;
mem.device_size = 0;
}
else {
generic_free(mem);
}
cuda_mem_map.erase(cuda_mem_map.find(&mem));
}
else {
generic_free(mem);
@ -996,7 +1064,7 @@ public:
bool denoising_set_tiles(device_ptr *buffers, DenoisingTask *task)
{
TilesInfo *tiles = (TilesInfo*) task->tiles_mem.data_pointer;
TilesInfo *tiles = (TilesInfo*) task->tiles_mem.host_pointer;
for(int i = 0; i < 9; i++) {
tiles->buffers[i] = buffers[i];
}
@ -1393,7 +1461,7 @@ public:
/* Allocate work tile. */
work_tiles.alloc(1);
WorkTile *wtile = work_tiles.get_data();
WorkTile *wtile = work_tiles.data();
wtile->x = rtile.x;
wtile->y = rtile.y;
wtile->w = rtile.w;
@ -1654,7 +1722,7 @@ public:
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pmem.cuPBO);
uchar *pixels = (uchar*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_READ_ONLY);
size_t offset = sizeof(uchar)*4*y*w;
memcpy((uchar*)mem.data_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
memcpy((uchar*)mem.host_pointer + offset, pixels + offset, sizeof(uchar)*4*w*h);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}

@ -24,7 +24,6 @@ CCL_NAMESPACE_BEGIN
device_memory::device_memory(Device *device, const char *name, MemoryType type)
: data_type(device_type_traits<uchar>::data_type),
data_elements(device_type_traits<uchar>::num_elements),
data_pointer(0),
data_size(0),
device_size(0),
data_width(0),
@ -35,7 +34,8 @@ device_memory::device_memory(Device *device, const char *name, MemoryType type)
interpolation(INTERPOLATION_NONE),
extension(EXTENSION_REPEAT),
device(device),
device_pointer(0)
device_pointer(0),
host_pointer(0)
{
}
@ -43,14 +43,14 @@ device_memory::~device_memory()
{
}
device_ptr device_memory::host_alloc(size_t size)
void *device_memory::host_alloc(size_t size)
{
if(!size) {
return 0;
}
size_t alignment = device->mem_address_alignment();
device_ptr ptr = (device_ptr)util_aligned_malloc(size, alignment);
void *ptr = util_aligned_malloc(size, alignment);
if(ptr) {
util_guarded_mem_alloc(size);
@ -62,11 +62,12 @@ device_ptr device_memory::host_alloc(size_t size)
return ptr;
}
void device_memory::host_free(device_ptr ptr, size_t size)
void device_memory::host_free()
{
if(ptr) {
util_guarded_mem_free(size);
util_aligned_free((void*)ptr);
if(host_pointer) {
util_guarded_mem_free(memory_size());
util_aligned_free((void*)host_pointer);
host_pointer = 0;
}
}

@ -34,6 +34,7 @@ class Device;
enum MemoryType {
MEM_READ_ONLY,
MEM_READ_WRITE,
MEM_DEVICE_ONLY,
MEM_TEXTURE,
MEM_PIXELS
};
@ -182,7 +183,6 @@ public:
/* Data information. */
DataType data_type;
int data_elements;
device_ptr data_pointer;
size_t data_size;
size_t device_size;
size_t data_width;
@ -193,9 +193,10 @@ public:
InterpolationType interpolation;
ExtensionType extension;
/* Device pointer. */
/* Pointers. */
Device *device;
device_ptr device_pointer;
void *host_pointer;
virtual ~device_memory();
@ -207,11 +208,11 @@ protected:
device_memory(const device_memory&);
device_memory& operator = (const device_memory&);
/* Host allocation on the device. All data_pointer memory should be
/* Host allocation on the device. All host_pointer memory should be
* allocated with these functions, for devices that support using
* the same pointer for host and device. */
device_ptr host_alloc(size_t size);
void host_free(device_ptr ptr, size_t size);
void *host_alloc(size_t size);
void host_free();
/* Device memory allocation and copying. */
void device_alloc();
@ -231,7 +232,7 @@ class device_only_memory : public device_memory
{
public:
device_only_memory(Device *device, const char *name)
: device_memory(device, name, MEM_READ_WRITE)
: device_memory(device, name, MEM_DEVICE_ONLY)
{
data_type = device_type_traits<T>::data_type;
data_elements = max(device_type_traits<T>::num_elements, 1);
@ -294,8 +295,8 @@ public:
if(new_size != data_size) {
device_free();
host_free(data_pointer, sizeof(T)*data_size);
data_pointer = host_alloc(sizeof(T)*new_size);
host_free();
host_pointer = host_alloc(sizeof(T)*new_size);
assert(device_pointer == 0);
}
@ -304,7 +305,7 @@ public:
data_height = height;
data_depth = depth;
return get_data();
return data();
}
/* Host memory resize. Only use this if the original data needs to be
@ -314,16 +315,16 @@ public:
size_t new_size = size(width, height, depth);
if(new_size != data_size) {
device_ptr new_ptr = host_alloc(sizeof(T)*new_size);
void *new_ptr = host_alloc(sizeof(T)*new_size);
if(new_size && data_size) {
size_t min_size = ((new_size < data_size)? new_size: data_size);
memcpy((T*)new_ptr, (T*)data_pointer, sizeof(T)*min_size);
memcpy((T*)new_ptr, (T*)host_pointer, sizeof(T)*min_size);
}
device_free();
host_free(data_pointer, sizeof(T)*data_size);
data_pointer = new_ptr;
host_free();
host_pointer = new_ptr;
assert(device_pointer == 0);
}
@ -332,20 +333,20 @@ public:
data_height = height;
data_depth = depth;
return get_data();
return data();
}
/* Take over data from an existing array. */
void steal_data(array<T>& from)
{
device_free();
host_free(data_pointer, sizeof(T)*data_size);
host_free();
data_size = from.size();
data_width = 0;
data_height = 0;
data_depth = 0;
data_pointer = (device_ptr)from.steal_pointer();
host_pointer = from.steal_pointer();
assert(device_pointer == 0);
}
@ -353,13 +354,13 @@ public:
void free()
{
device_free();
host_free(data_pointer, sizeof(T)*data_size);
host_free();
data_size = 0;
data_width = 0;
data_height = 0;
data_depth = 0;
data_pointer = 0;
host_pointer = 0;
assert(device_pointer == 0);
}
@ -368,15 +369,15 @@ public:
return data_size;
}
T* get_data()
T* data()
{
return (T*)data_pointer;
return (T*)host_pointer;
}
T& operator[](size_t i)
{
assert(i < data_size);
return get_data()[i];
return data()[i];
}
void copy_to_device()
@ -423,7 +424,7 @@ public:
T *copy_from_device(int y, int w, int h)
{
device_memory::device_copy_from(y, w, h, sizeof(T));
return device_vector<T>::get_data();
return device_vector<T>::data();
}
};

@ -112,7 +112,7 @@ public:
snd.add(mem);
snd.write();
snd.write_buffer((void*)mem.data_pointer, mem.memory_size());
snd.write_buffer(mem.host_pointer, mem.memory_size());
}
void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
@ -131,7 +131,7 @@ public:
snd.write();
RPCReceive rcv(socket, &error_func);
rcv.read_buffer((void*)mem.data_pointer, data_size);
rcv.read_buffer(mem.host_pointer, data_size);
}
void mem_zero(device_memory& mem)
@ -439,7 +439,7 @@ protected:
device_ptr client_pointer = mem.device_pointer;
DataVector &data_v = data_vector_insert(client_pointer, data_size);
mem.data_pointer = (data_size)? (device_ptr)&(data_v[0]): 0;
mem.host_pointer = (data_size)? (void*)&(data_v[0]): 0;
/* Perform the allocation on the actual device. */
device->mem_alloc(mem);
@ -459,7 +459,7 @@ protected:
if(client_pointer) {
/* Lookup existing host side data buffer. */
DataVector &data_v = data_vector_find(client_pointer);
mem.data_pointer = (device_ptr)&data_v[0];
mem.host_pointer = (void*)&data_v[0];
/* Translate the client pointer to a real device pointer. */
mem.device_pointer = device_ptr_from_client_pointer(client_pointer);
@ -467,11 +467,11 @@ protected:
else {
/* Allocate host side data buffer. */
DataVector &data_v = data_vector_insert(client_pointer, data_size);
mem.data_pointer = (data_size)? (device_ptr)&(data_v[0]): 0;
mem.host_pointer = (data_size)? (void*)&(data_v[0]): 0;
}
/* Copy data from network into memory buffer. */
rcv.read_buffer((uint8_t*)mem.data_pointer, data_size);
rcv.read_buffer((uint8_t*)mem.host_pointer, data_size);
/* Copy the data from the memory buffer to the device buffer. */
device->mem_copy_to(mem);
@ -497,7 +497,7 @@ protected:
DataVector &data_v = data_vector_find(client_pointer);
mem.data_pointer = (device_ptr)&(data_v[0]);
mem.host_pointer = (device_ptr)&(data_v[0]);
device->mem_copy_from(mem, y, w, h, elem);
@ -505,7 +505,7 @@ protected:
RPCSend snd(socket, &error_func, "mem_copy_from");
snd.write();
snd.write_buffer((uint8_t*)mem.data_pointer, data_size);
snd.write_buffer((uint8_t*)mem.host_pointer, data_size);
lock.unlock();
}
else if(rcv.name == "mem_zero") {
@ -520,7 +520,7 @@ protected:
if(client_pointer) {
/* Lookup existing host side data buffer. */
DataVector &data_v = data_vector_find(client_pointer);
mem.data_pointer = (device_ptr)&data_v[0];
mem.host_pointer = (void*)&data_v[0];
/* Translate the client pointer to a real device pointer. */
mem.device_pointer = device_ptr_from_client_pointer(client_pointer);
@ -528,7 +528,7 @@ protected:
else {
/* Allocate host side data buffer. */
DataVector &data_v = data_vector_insert(client_pointer, data_size);
mem.data_pointer = (data_size)? (device_ptr)&(data_v[0]): 0;
mem.host_pointer = (void*)? (device_ptr)&(data_v[0]): 0;
}
/* Zero memory. */

@ -278,7 +278,7 @@ public:
*archive & mem.device_pointer;
mem.name = name.c_str();
mem.data_pointer = 0;
mem.host_pointer = 0;
/* Can't transfer OpenGL texture over network. */
if(mem.type == MEM_PIXELS) {

@ -280,8 +280,8 @@ bool DeviceSplitKernel::path_trace(DeviceTask *task,
activeRaysAvailable = false;
for(int rayStateIter = 0; rayStateIter < global_size[0] * global_size[1]; ++rayStateIter) {
if(!IS_STATE(ray_state.get_data(), rayStateIter, RAY_INACTIVE)) {
if(IS_STATE(ray_state.get_data(), rayStateIter, RAY_INVALID)) {
if(!IS_STATE(ray_state.data(), rayStateIter, RAY_INACTIVE)) {
if(IS_STATE(ray_state.data(), rayStateIter, RAY_INVALID)) {
/* Something went wrong, abort to avoid looping endlessly. */
device->set_error("Split kernel error: invalid ray state");
return false;

@ -88,7 +88,7 @@ void MemoryManager::DeviceBuffer::update_device_memory(OpenCLDeviceBase *device)
CL_FALSE,
offset,
allocation->mem->memory_size(),
(void*)allocation->mem->data_pointer,
allocation->mem->host_pointer,
0, NULL, NULL
));
@ -127,7 +127,7 @@ void MemoryManager::DeviceBuffer::update_device_memory(OpenCLDeviceBase *device)
CL_FALSE,
offset,
allocation->mem->memory_size(),
(void*)allocation->mem->data_pointer,
allocation->mem->host_pointer,
0, NULL, NULL
));

@ -362,7 +362,7 @@ void OpenCLDeviceBase::mem_copy_to(device_memory& mem)
CL_TRUE,
0,
size,
(void*)mem.data_pointer,
mem.host_pointer,
0,
NULL, NULL));
}
@ -379,7 +379,7 @@ void OpenCLDeviceBase::mem_copy_from(device_memory& mem, int y, int w, int h, in
CL_TRUE,
offset,
size,
(uchar*)mem.data_pointer + offset,
(uchar*)mem.host_pointer + offset,
0,
NULL, NULL));
}
@ -426,14 +426,14 @@ void OpenCLDeviceBase::mem_zero(device_memory& mem)
mem_zero_kernel(mem.device_pointer, mem.memory_size());
}
if(mem.data_pointer) {
memset((void*)mem.data_pointer, 0, mem.memory_size());
if(mem.host_pointer) {
memset(mem.host_pointer, 0, mem.memory_size());
}
if(!base_program.is_loaded()) {
void* zero = (void*)mem.data_pointer;
void* zero = mem.host_pointer;
if(!mem.data_pointer) {
if(!mem.host_pointer) {
zero = util_aligned_malloc(mem.memory_size(), 16);
memset(zero, 0, mem.memory_size());
}
@ -447,7 +447,7 @@ void OpenCLDeviceBase::mem_zero(device_memory& mem)
0,
NULL, NULL));
if(!mem.data_pointer) {
if(!mem.host_pointer) {
util_aligned_free(zero);
}
}
@ -519,7 +519,7 @@ void OpenCLDeviceBase::const_copy_to(const char *name, void *host, size_t size)
data = i->second;
}
memcpy(data->get_data(), host, size);
memcpy(data->data(), host, size);
data->copy_to_device();
}

@ -20,17 +20,16 @@ ccl_device ShaderClosure *closure_alloc(ShaderData *sd, int size, ClosureType ty
{
kernel_assert(size <= sizeof(ShaderClosure));
int num_closure = sd->num_closure;
int num_closure_extra = sd->num_closure_extra;
if(num_closure + num_closure_extra >= MAX_CLOSURE)
if(sd->num_closure_left == 0)
return NULL;
ShaderClosure *sc = &sd->closure[num_closure];
ShaderClosure *sc = &sd->closure[sd->num_closure];
sc->type = type;
sc->weight = weight;
sd->num_closure++;
sd->num_closure_left--;
return sc;
}
@ -44,18 +43,16 @@ ccl_device ccl_addr_space void *closure_alloc_extra(ShaderData *sd, int size)
* This lets us keep the same fast array iteration over closures, as we
* found linked list iteration and iteration with skipping to be slower. */
int num_extra = ((size + sizeof(ShaderClosure) - 1) / sizeof(ShaderClosure));
int num_closure = sd->num_closure;
int num_closure_extra = sd->num_closure_extra + num_extra;
if(num_closure + num_closure_extra > MAX_CLOSURE) {
if(num_extra > sd->num_closure_left) {
/* Remove previous closure. */
sd->num_closure--;
sd->num_closure_extra++;
sd->num_closure_left++;
return NULL;
}
sd->num_closure_extra = num_closure_extra;
return (ccl_addr_space void*)(sd->closure + MAX_CLOSURE - num_closure_extra);
sd->num_closure_left -= num_extra;
return (ccl_addr_space void*)(sd->closure + sd->num_closure + sd->num_closure_left);
}
ccl_device_inline ShaderClosure *bsdf_alloc(ShaderData *sd, int size, float3 weight)

@ -35,10 +35,22 @@
CCL_NAMESPACE_BEGIN
ccl_device int bsdf_transparent_setup(ShaderClosure *sc)
ccl_device void bsdf_transparent_setup(ShaderData *sd, const float3 weight)
{
sc->type = CLOSURE_BSDF_TRANSPARENT_ID;
return SD_BSDF|SD_TRANSPARENT;
if(sd->flag & SD_TRANSPARENT) {
sd->closure_transparent_extinction += weight;
}
else {
sd->flag |= SD_BSDF|SD_TRANSPARENT;
sd->closure_transparent_extinction = weight;
}
ShaderClosure *bsdf = bsdf_alloc(sd, sizeof(ShaderClosure), weight);
if(bsdf) {
bsdf->N = sd->N;
bsdf->type = CLOSURE_BSDF_TRANSPARENT_ID;
}
}
ccl_device float3 bsdf_transparent_eval_reflect(const ShaderClosure *sc, const float3 I, const float3 omega_in, float *pdf)

@ -32,8 +32,32 @@
CCL_NAMESPACE_BEGIN
/* BACKGROUND CLOSURE */
ccl_device void background_setup(ShaderData *sd, const float3 weight)
{
if(sd->flag & SD_EMISSION) {
sd->closure_emission_background += weight;
}
else {
sd->flag |= SD_EMISSION;
sd->closure_emission_background = weight;
}
}
/* EMISSION CLOSURE */
ccl_device void emission_setup(ShaderData *sd, const float3 weight)
{
if(sd->flag & SD_EMISSION) {
sd->closure_emission_background += weight;
}
else {
sd->flag |= SD_EMISSION;
sd->closure_emission_background = weight;
}
}
/* return the probability distribution function in the direction I,
* given the parameters and the light's surface normal. This MUST match
* the PDF computed by sample(). */

@ -19,14 +19,27 @@
CCL_NAMESPACE_BEGIN
/* VOLUME EXTINCTION */
ccl_device void volume_extinction_setup(ShaderData *sd, float3 weight)
{
if(sd->flag & SD_EXTINCTION) {
sd->closure_transparent_extinction += weight;
}
else {
sd->flag |= SD_EXTINCTION;
sd->closure_transparent_extinction = weight;
}
}
/* HENYEY-GREENSTEIN CLOSURE */
typedef ccl_addr_space struct HenyeyGreensteinVolume {
SHADER_CLOSURE_BASE;
float g;
} HenyeyGreensteinVolume;
/* HENYEY-GREENSTEIN CLOSURE */
/* Given cosine between rays, return probability density that a photon bounces
* to that direction. The g parameter controls how different it is from the
* uniform sphere. g=0 uniform diffuse-like, g=1 close to sharp single ray. */
@ -110,15 +123,6 @@ ccl_device int volume_henyey_greenstein_sample(const ShaderClosure *sc, float3 I
return LABEL_VOLUME_SCATTER;
}
/* ABSORPTION VOLUME CLOSURE */
ccl_device int volume_absorption_setup(ShaderClosure *sc)
{
sc->type = CLOSURE_VOLUME_ABSORPTION_ID;
return SD_ABSORPTION;
}
/* VOLUME CLOSURE */
ccl_device float3 volume_phase_eval(const ShaderData *sd, const ShaderClosure *sc, float3 omega_in, float *pdf)

@ -51,14 +51,21 @@ ccl_device_inline AttributeDescriptor attribute_not_found()
/* Find attribute based on ID */
ccl_device_inline uint object_attribute_map_offset(KernelGlobals *kg, int object)
{
int offset = object*OBJECT_SIZE + 11;
float4 f = kernel_tex_fetch(__objects, offset);
return __float_as_uint(f.y);
}
ccl_device_inline AttributeDescriptor find_attribute(KernelGlobals *kg, const ShaderData *sd, uint id)
{
if(sd->object == PRIM_NONE) {
if(sd->object == OBJECT_NONE) {
return attribute_not_found();
}
/* for SVM, find attribute by unique id */
uint attr_offset = sd->object*kernel_data.bvh.attributes_map_stride;
uint attr_offset = object_attribute_map_offset(kg, sd->object);
attr_offset += attribute_primitive_type(kg, sd);
uint4 attr_map = kernel_tex_fetch(__attributes_map, attr_offset);

@ -33,7 +33,7 @@ ccl_device_inline int find_attribute_curve_motion(KernelGlobals *kg, int object,
* zero iterations and rendering is really slow with motion curves. For until other
* areas are speed up it's probably not so crucial to optimize this out.
*/
uint attr_offset = object*kernel_data.bvh.attributes_map_stride + ATTR_PRIM_CURVE;
uint attr_offset = object_attribute_map_offset(kg, object) + ATTR_PRIM_CURVE;
uint4 attr_map = kernel_tex_fetch(__attributes_map, attr_offset);
while(attr_map.x != id) {

@ -32,7 +32,7 @@ CCL_NAMESPACE_BEGIN
ccl_device_inline int find_attribute_motion(KernelGlobals *kg, int object, uint id, AttributeElement *elem)
{
/* todo: find a better (faster) solution for this, maybe store offset per object */
uint attr_offset = object*kernel_data.bvh.attributes_map_stride;
uint attr_offset = object_attribute_map_offset(kg, object);
uint4 attr_map = kernel_tex_fetch(__attributes_map, attr_offset);
while(attr_map.x != id) {

@ -40,7 +40,7 @@ bool kernel_osl_use(KernelGlobals *kg);
void kernel_const_copy(KernelGlobals *kg, const char *name, void *host, size_t size);
void kernel_tex_copy(KernelGlobals *kg,
const char *name,
device_ptr mem,
void *mem,
size_t size);
#define KERNEL_ARCH cpu

@ -51,7 +51,7 @@ ccl_device_inline void compute_light_pass(KernelGlobals *kg,
path_state_init(kg, &emission_sd, &state, rng_hash, sample, NULL);
/* evaluate surface shader */
shader_eval_surface(kg, sd, &state, state.flag);
shader_eval_surface(kg, sd, &state, state.flag, MAX_CLOSURE);
/* TODO, disable more closures we don't need besides transparent */
shader_bsdf_disable_transparency(kg, sd);
@ -239,12 +239,12 @@ ccl_device float3 kernel_bake_evaluate_direct_indirect(KernelGlobals *kg,
}
else {
/* surface color of the pass only */
shader_eval_surface(kg, sd, state, 0);
shader_eval_surface(kg, sd, state, 0, MAX_CLOSURE);
return kernel_bake_shader_bsdf(kg, sd, type);
}
}
else {
shader_eval_surface(kg, sd, state, 0);
shader_eval_surface(kg, sd, state, 0, MAX_CLOSURE);
color = kernel_bake_shader_bsdf(kg, sd, type);
}
@ -337,7 +337,7 @@ ccl_device void kernel_bake_evaluate(KernelGlobals *kg, ccl_global uint4 *input,
{
float3 N = sd.N;
if((sd.flag & SD_HAS_BUMP)) {
shader_eval_surface(kg, &sd, &state, 0);
shader_eval_surface(kg, &sd, &state, 0, MAX_CLOSURE);
N = shader_bsdf_average_normal(kg, &sd);
}
@ -352,7 +352,7 @@ ccl_device void kernel_bake_evaluate(KernelGlobals *kg, ccl_global uint4 *input,
}
case SHADER_EVAL_EMISSION:
{
shader_eval_surface(kg, &sd, &state, 0);
shader_eval_surface(kg, &sd, &state, 0, 0);
out = shader_emissive_eval(kg, &sd);
break;
}

@ -70,14 +70,11 @@ ccl_device_noinline float3 direct_emissive_eval(KernelGlobals *kg,
/* no path flag, we're evaluating this for all closures. that's weak but
* we'd have to do multiple evaluations otherwise */
path_state_modify_bounce(state, true);
shader_eval_surface(kg, emission_sd, state, 0);
shader_eval_surface(kg, emission_sd, state, 0, 0);
path_state_modify_bounce(state, false);
/* evaluate emissive closure */
if(emission_sd->flag & SD_EMISSION)
eval = shader_emissive_eval(kg, emission_sd);
else
eval = make_float3(0.0f, 0.0f, 0.0f);
}
eval *= ls->eval_fac;

@ -132,7 +132,7 @@ ccl_device_forceinline void kernel_path_background(
ccl_addr_space PathState *state,
ccl_addr_space Ray *ray,
float3 throughput,
ShaderData *emission_sd,
ShaderData *sd,
PathRadiance *L)
{
/* eval background shader if nothing hit */
@ -153,7 +153,7 @@ ccl_device_forceinline void kernel_path_background(
#ifdef __BACKGROUND__
/* sample background shader */
float3 L_background = indirect_background(kg, emission_sd, state, ray);
float3 L_background = indirect_background(kg, sd, state, ray);
path_radiance_accum_background(L, state, throughput, L_background);
#endif /* __BACKGROUND__ */
}
@ -407,7 +407,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg,
bool hit = kernel_path_scene_intersect(kg, state, ray, &isect, L);
/* Find intersection with lamps and compute emission for MIS. */
kernel_path_lamp_emission(kg, state, ray, throughput, &isect, emission_sd, L);
kernel_path_lamp_emission(kg, state, ray, throughput, &isect, sd, L);
#ifdef __VOLUME__
/* Volume integration. */
@ -431,7 +431,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg,
/* Shade background. */
if(!hit) {
kernel_path_background(kg, state, ray, throughput, emission_sd, L);
kernel_path_background(kg, state, ray, throughput, sd, L);
break;
}
else if(path_state_ao_bounce(kg, state)) {
@ -443,7 +443,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg,
sd,
&isect,
ray);
shader_eval_surface(kg, sd, state, state->flag);
shader_eval_surface(kg, sd, state, state->flag, MAX_CLOSURE);
shader_prepare_closures(sd, state);
/* Apply shadow catcher, holdout, emission. */
@ -561,7 +561,7 @@ ccl_device_forceinline void kernel_path_integrate(
bool hit = kernel_path_scene_intersect(kg, state, ray, &isect, L);
/* Find intersection with lamps and compute emission for MIS. */
kernel_path_lamp_emission(kg, state, ray, throughput, &isect, emission_sd, L);
kernel_path_lamp_emission(kg, state, ray, throughput, &isect, &sd, L);
#ifdef __VOLUME__
/* Volume integration. */
@ -585,7 +585,7 @@ ccl_device_forceinline void kernel_path_integrate(
/* Shade background. */
if(!hit) {
kernel_path_background(kg, state, ray, throughput, emission_sd, L);
kernel_path_background(kg, state, ray, throughput, &sd, L);
break;
}
else if(path_state_ao_bounce(kg, state)) {
@ -594,7 +594,7 @@ ccl_device_forceinline void kernel_path_integrate(
/* Setup and evaluate shader. */
shader_setup_from_ray(kg, &sd, &isect, ray);
shader_eval_surface(kg, &sd, state, state->flag);
shader_eval_surface(kg, &sd, state, state->flag, MAX_CLOSURE);
shader_prepare_closures(&sd, state);
/* Apply shadow catcher, holdout, emission. */
@ -706,9 +706,11 @@ ccl_device void kernel_path_trace(KernelGlobals *kg,
PathRadiance L;
path_radiance_init(&L, kernel_data.film.use_light_pass);
ShaderData emission_sd;
ShaderDataTinyStorage emission_sd_storage;
ShaderData *emission_sd = AS_SHADER_DATA(&emission_sd_storage);
PathState state;
path_state_init(kg, &emission_sd, &state, rng_hash, sample, &ray);
path_state_init(kg, emission_sd, &state, rng_hash, sample, &ray);
/* Integrate. */
kernel_path_integrate(kg,
@ -717,7 +719,7 @@ ccl_device void kernel_path_trace(KernelGlobals *kg,
&ray,
&L,
buffer,
&emission_sd);
emission_sd);
kernel_write_result(kg, buffer, sample, &L);
}

@ -436,10 +436,12 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg,
/* shader data memory used for both volumes and surfaces, saves stack space */
ShaderData sd;
/* shader data used by emission, shadows, volume stacks, indirect path */
ShaderData emission_sd, indirect_sd;
ShaderDataTinyStorage emission_sd_storage;
ShaderData *emission_sd = AS_SHADER_DATA(&emission_sd_storage);
ShaderData indirect_sd;
PathState state;
path_state_init(kg, &emission_sd, &state, rng_hash, sample, &ray);
path_state_init(kg, emission_sd, &state, rng_hash, sample, &ray);
/* Main Loop
* Here we only handle transparency intersections from the camera ray.
@ -460,19 +462,19 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg,
&isect,
hit,
&indirect_sd,
&emission_sd,
emission_sd,
L);
#endif /* __VOLUME__ */
/* Shade background. */
if(!hit) {
kernel_path_background(kg, &state, &ray, throughput, &emission_sd, L);
kernel_path_background(kg, &state, &ray, throughput, &sd, L);
break;
}
/* Setup and evaluate shader. */
shader_setup_from_ray(kg, &sd, &isect, &ray);
shader_eval_surface(kg, &sd, &state, state.flag);
shader_eval_surface(kg, &sd, &state, state.flag, MAX_CLOSURE);
shader_merge_closures(&sd);
/* Apply shadow catcher, holdout, emission. */
@ -481,7 +483,7 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg,
&state,
&ray,
throughput,
&emission_sd,
emission_sd,
L,
buffer))
{
@ -513,14 +515,14 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg,
#ifdef __AO__
/* ambient occlusion */
if(kernel_data.integrator.use_ambient_occlusion || (sd.flag & SD_AO)) {
kernel_branched_path_ao(kg, &sd, &emission_sd, L, &state, throughput);
kernel_branched_path_ao(kg, &sd, emission_sd, L, &state, throughput);
}
#endif /* __AO__ */
#ifdef __SUBSURFACE__
/* bssrdf scatter to a different location on the same object */
if(sd.flag & SD_BSSRDF) {
kernel_branched_path_subsurface_scatter(kg, &sd, &indirect_sd, &emission_sd,
kernel_branched_path_subsurface_scatter(kg, &sd, &indirect_sd, emission_sd,
L, &state, &ray, throughput);
}
#endif /* __SUBSURFACE__ */
@ -534,13 +536,13 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg,
int all = (kernel_data.integrator.sample_all_lights_direct) ||
(state.flag & PATH_RAY_SHADOW_CATCHER);
kernel_branched_path_surface_connect_light(kg,
&sd, &emission_sd, &hit_state, throughput, 1.0f, L, all);
&sd, emission_sd, &hit_state, throughput, 1.0f, L, all);
}
#endif /* __EMISSION__ */
/* indirect light */
kernel_branched_path_surface_indirect_light(kg,
&sd, &indirect_sd, &emission_sd, throughput, 1.0f, &hit_state, L);
&sd, &indirect_sd, emission_sd, throughput, 1.0f, &hit_state, L);
/* continue in case of transparency */
throughput *= shader_bsdf_transparency(kg, &sd);

@ -764,23 +764,20 @@ ccl_device void shader_bsdf_blur(KernelGlobals *kg, ShaderData *sd, float roughn
ccl_device float3 shader_bsdf_transparency(KernelGlobals *kg, const ShaderData *sd)
{
if(sd->flag & SD_HAS_ONLY_VOLUME)
if(sd->flag & SD_HAS_ONLY_VOLUME) {
return make_float3(1.0f, 1.0f, 1.0f);
float3 eval = make_float3(0.0f, 0.0f, 0.0f);
for(int i = 0; i < sd->num_closure; i++) {
const ShaderClosure *sc = &sd->closure[i];
if(sc->type == CLOSURE_BSDF_TRANSPARENT_ID) // todo: make this work for osl
eval += sc->weight;
}
return eval;
else if(sd->flag & SD_TRANSPARENT) {
return sd->closure_transparent_extinction;
}
else {
return make_float3(0.0f, 0.0f, 0.0f);
}
}
ccl_device void shader_bsdf_disable_transparency(KernelGlobals *kg, ShaderData *sd)
{
if(sd->flag & SD_TRANSPARENT) {
for(int i = 0; i < sd->num_closure; i++) {
ShaderClosure *sc = &sd->closure[i];
@ -789,6 +786,9 @@ ccl_device void shader_bsdf_disable_transparency(KernelGlobals *kg, ShaderData *
sc->weight = make_float3(0.0f, 0.0f, 0.0f);
}
}
sd->flag &= ~SD_TRANSPARENT;
}
}
ccl_device float3 shader_bsdf_alpha(KernelGlobals *kg, ShaderData *sd)
@ -926,24 +926,14 @@ ccl_device float3 shader_bssrdf_sum(ShaderData *sd, float3 *N_, float *texture_b
/* Emission */
ccl_device float3 emissive_eval(KernelGlobals *kg, ShaderData *sd, ShaderClosure *sc)
{
return emissive_simple_eval(sd->Ng, sd->I);
}
ccl_device float3 shader_emissive_eval(KernelGlobals *kg, ShaderData *sd)
{
float3 eval;
eval = make_float3(0.0f, 0.0f, 0.0f);
for(int i = 0; i < sd->num_closure; i++) {
ShaderClosure *sc = &sd->closure[i];
if(CLOSURE_IS_EMISSION(sc->type))
eval += emissive_eval(kg, sd, sc)*sc->weight;
if(sd->flag & SD_EMISSION) {
return emissive_simple_eval(sd->Ng, sd->I) * sd->closure_emission_background;
}
else {
return make_float3(0.0f, 0.0f, 0.0f);
}
return eval;
}
/* Holdout */
@ -965,10 +955,10 @@ ccl_device float3 shader_holdout_eval(KernelGlobals *kg, ShaderData *sd)
/* Surface Evaluation */
ccl_device void shader_eval_surface(KernelGlobals *kg, ShaderData *sd,
ccl_addr_space PathState *state, int path_flag)
ccl_addr_space PathState *state, int path_flag, int max_closure)
{
sd->num_closure = 0;
sd->num_closure_extra = 0;
sd->num_closure_left = max_closure;
#ifdef __OSL__
if(kg->osl)
@ -998,7 +988,7 @@ ccl_device float3 shader_eval_background(KernelGlobals *kg, ShaderData *sd,
ccl_addr_space PathState *state, int path_flag)
{
sd->num_closure = 0;
sd->num_closure_extra = 0;
sd->num_closure_left = 0;
#ifdef __SVM__
# ifdef __OSL__
@ -1011,16 +1001,12 @@ ccl_device float3 shader_eval_background(KernelGlobals *kg, ShaderData *sd,
svm_eval_nodes(kg, sd, state, SHADER_TYPE_SURFACE, path_flag);
}
float3 eval = make_float3(0.0f, 0.0f, 0.0f);
for(int i = 0; i < sd->num_closure; i++) {
const ShaderClosure *sc = &sd->closure[i];
if(CLOSURE_IS_BACKGROUND(sc->type))
eval += sc->weight;
if(sd->flag & SD_EMISSION) {
return sd->closure_emission_background;
}
else {
return make_float3(0.0f, 0.0f, 0.0f);
}
return eval;
#else /* __SVM__ */
return make_float3(0.8f, 0.8f, 0.8f);
#endif /* __SVM__ */
@ -1143,12 +1129,13 @@ ccl_device_inline void shader_eval_volume(KernelGlobals *kg,
ShaderData *sd,
ccl_addr_space PathState *state,
ccl_addr_space VolumeStack *stack,
int path_flag)
int path_flag,
int max_closure)
{
/* reset closures once at the start, we will be accumulating the closures
* for all volumes in the stack into a single array of closures */
sd->num_closure = 0;
sd->num_closure_extra = 0;
sd->num_closure_left = max_closure;
sd->flag = 0;
sd->object_flag = 0;
@ -1198,7 +1185,7 @@ ccl_device_inline void shader_eval_volume(KernelGlobals *kg,
ccl_device void shader_eval_displacement(KernelGlobals *kg, ShaderData *sd, ccl_addr_space PathState *state)
{
sd->num_closure = 0;
sd->num_closure_extra = 0;
sd->num_closure_left = 0;
/* this will modify sd->P */
#ifdef __SVM__

@ -86,7 +86,8 @@ ccl_device_forceinline bool shadow_handle_transparent_isect(
shader_eval_surface(kg,
shadow_sd,
state,
PATH_RAY_SHADOW);
PATH_RAY_SHADOW,
0);
path_state_modify_bounce(state, false);
*throughput *= shader_bsdf_transparency(kg, shadow_sd);
}

@ -80,7 +80,7 @@ ccl_device void subsurface_scatter_setup_diffuse_bsdf(ShaderData *sd, const Shad
{
sd->flag &= ~SD_CLOSURE_FLAGS;
sd->num_closure = 0;
sd->num_closure_extra = 0;
sd->num_closure_left = MAX_CLOSURE;
if(hit) {
Bssrdf *bssrdf = (Bssrdf *)sc;
@ -154,7 +154,7 @@ ccl_device void subsurface_color_bump_blur(KernelGlobals *kg,
if(bump || texture_blur > 0.0f) {
/* average color and normal at incoming point */
shader_eval_surface(kg, sd, state, state_flag);
shader_eval_surface(kg, sd, state, state_flag, MAX_CLOSURE);
float3 in_color = shader_bssrdf_sum(sd, (bump)? N: NULL, NULL);
/* we simply divide out the average color and multiply with the average

@ -812,7 +812,7 @@ enum ShaderDataFlag {
/* Set when ray hits backside of surface. */
SD_BACKFACING = (1 << 0),
/* Shader has emissive closure. */
/* Shader has non-zero emission. */
SD_EMISSION = (1 << 1),
/* Shader has BSDF closure. */
SD_BSDF = (1 << 2),
@ -822,8 +822,8 @@ enum ShaderDataFlag {
SD_BSSRDF = (1 << 4),
/* Shader has holdout closure. */
SD_HOLDOUT = (1 << 5),
/* Shader has volume absorption closure. */
SD_ABSORPTION = (1 << 6),
/* Shader has non-zero volume extinction. */
SD_EXTINCTION = (1 << 6),
/* Shader has have volume phase (scatter) closure. */
SD_SCATTER = (1 << 7),
/* Shader has AO closure. */
@ -838,7 +838,7 @@ enum ShaderDataFlag {
SD_BSDF_HAS_EVAL |
SD_BSSRDF |
SD_HOLDOUT |
SD_ABSORPTION |
SD_EXTINCTION |
SD_SCATTER |
SD_AO |
SD_BSDF_NEEDS_LCG),
@ -970,16 +970,6 @@ typedef ccl_addr_space struct ShaderData {
Transform ob_itfm;
#endif
/* Closure data, we store a fixed array of closures */
struct ShaderClosure closure[MAX_CLOSURE];
int num_closure;
int num_closure_extra;
float randb_closure;
float3 svm_closure_weight;
/* LCG state for closures that require additional random numbers. */
uint lcg_state;
/* ray start position, only set for backgrounds */
float3 ray_P;
differential3 ray_dP;
@ -988,8 +978,30 @@ typedef ccl_addr_space struct ShaderData {
struct KernelGlobals *osl_globals;
struct PathState *osl_path_state;
#endif
/* LCG state for closures that require additional random numbers. */
uint lcg_state;
/* Closure data, we store a fixed array of closures */
int num_closure;
int num_closure_left;
float randb_closure;
float3 svm_closure_weight;
/* Closure weights summed directly, so we can evaluate
* emission and shadow transparency with MAX_CLOSURE 0. */
float3 closure_emission_background;
float3 closure_transparent_extinction;
/* At the end so we can adjust size in ShaderDataTinyStorage. */
struct ShaderClosure closure[MAX_CLOSURE];
} ShaderData;
typedef ccl_addr_space struct ShaderDataTinyStorage {
char pad[sizeof(ShaderData) - sizeof(ShaderClosure) * MAX_CLOSURE];
} ShaderDataTinyStorage;
#define AS_SHADER_DATA(shader_data_tiny_storage) ((ShaderData*)shader_data_tiny_storage)
/* Path State */
#ifdef __VOLUME__
@ -1295,13 +1307,12 @@ static_assert_align(KernelIntegrator, 16);
typedef struct KernelBVH {
/* root node */
int root;
int attributes_map_stride;
int have_motion;
int have_curves;
int have_instancing;
int use_qbvh;
int use_bvh_steps;
int pad1;
int pad1, pad2;
} KernelBVH;
static_assert_align(KernelBVH, 16);

@ -30,7 +30,7 @@ typedef enum VolumeIntegrateResult {
* sigma_t = sigma_a + sigma_s */
typedef struct VolumeShaderCoefficients {
float3 sigma_a;
float3 sigma_t;
float3 sigma_s;
float3 emission;
} VolumeShaderCoefficients;
@ -43,23 +43,16 @@ ccl_device_inline bool volume_shader_extinction_sample(KernelGlobals *kg,
float3 *extinction)
{
sd->P = P;
shader_eval_volume(kg, sd, state, state->volume_stack, PATH_RAY_SHADOW);
shader_eval_volume(kg, sd, state, state->volume_stack, PATH_RAY_SHADOW, 0);
if(!(sd->flag & (SD_ABSORPTION|SD_SCATTER)))
return false;
float3 sigma_t = make_float3(0.0f, 0.0f, 0.0f);
for(int i = 0; i < sd->num_closure; i++) {
const ShaderClosure *sc = &sd->closure[i];
if(CLOSURE_IS_VOLUME(sc->type))
sigma_t += sc->weight;
}
*extinction = sigma_t;
if(sd->flag & SD_EXTINCTION) {
*extinction = sd->closure_transparent_extinction;
return true;
}
else {
return false;
}
}
/* evaluate shader to get absorption, scattering and emission at P */
ccl_device_inline bool volume_shader_sample(KernelGlobals *kg,
@ -69,33 +62,29 @@ ccl_device_inline bool volume_shader_sample(KernelGlobals *kg,
VolumeShaderCoefficients *coeff)
{
sd->P = P;
shader_eval_volume(kg, sd, state, state->volume_stack, state->flag);
shader_eval_volume(kg, sd, state, state->volume_stack, state->flag, MAX_CLOSURE);
if(!(sd->flag & (SD_ABSORPTION|SD_SCATTER|SD_EMISSION)))
if(!(sd->flag & (SD_EXTINCTION|SD_SCATTER|SD_EMISSION)))
return false;
coeff->sigma_a = make_float3(0.0f, 0.0f, 0.0f);
coeff->sigma_s = make_float3(0.0f, 0.0f, 0.0f);
coeff->emission = make_float3(0.0f, 0.0f, 0.0f);
coeff->sigma_t = (sd->flag & SD_EXTINCTION)? sd->closure_transparent_extinction:
make_float3(0.0f, 0.0f, 0.0f);
coeff->emission = (sd->flag & SD_EMISSION)? sd->closure_emission_background:
make_float3(0.0f, 0.0f, 0.0f);
if(sd->flag & SD_SCATTER) {
if(state->volume_bounce < kernel_data.integrator.max_volume_bounce) {
for(int i = 0; i < sd->num_closure; i++) {
const ShaderClosure *sc = &sd->closure[i];
if(sc->type == CLOSURE_VOLUME_ABSORPTION_ID)
coeff->sigma_a += sc->weight;
else if(sc->type == CLOSURE_EMISSION_ID)
coeff->emission += sc->weight;
else if(CLOSURE_IS_VOLUME(sc->type))
if(CLOSURE_IS_VOLUME(sc->type))
coeff->sigma_s += sc->weight;
}
/* when at the max number of bounces, treat scattering as absorption */
if(sd->flag & SD_SCATTER) {
if(state->volume_bounce >= kernel_data.integrator.max_volume_bounce) {
coeff->sigma_a += coeff->sigma_s;
coeff->sigma_s = make_float3(0.0f, 0.0f, 0.0f);
}
else {
/* When at the max number of bounces, clear scattering. */
sd->flag &= ~SD_SCATTER;
sd->flag |= SD_ABSORPTION;
}
}
@ -336,8 +325,8 @@ ccl_device float3 kernel_volume_emission_integrate(VolumeShaderCoefficients *coe
* todo: we should use an epsilon to avoid precision issues near zero sigma_t */
float3 emission = coeff->emission;
if(closure_flag & SD_ABSORPTION) {
float3 sigma_t = coeff->sigma_a + coeff->sigma_s;
if(closure_flag & SD_EXTINCTION) {
float3 sigma_t = coeff->sigma_t;
emission.x *= (sigma_t.x > 0.0f)? (1.0f - transmittance.x)/sigma_t.x: t;
emission.y *= (sigma_t.y > 0.0f)? (1.0f - transmittance.y)/sigma_t.y: t;
@ -375,7 +364,7 @@ ccl_device VolumeIntegrateResult kernel_volume_integrate_homogeneous(
/* randomly scatter, and if we do t is shortened */
if(closure_flag & SD_SCATTER) {
/* extinction coefficient */
float3 sigma_t = coeff.sigma_a + coeff.sigma_s;
float3 sigma_t = coeff.sigma_t;
/* pick random color channel, we use the Veach one-sample
* model with balance heuristic for the channels */
@ -426,22 +415,22 @@ ccl_device VolumeIntegrateResult kernel_volume_integrate_homogeneous(
}
else
#endif
if(closure_flag & SD_ABSORPTION) {
if(closure_flag & SD_EXTINCTION) {
/* absorption only, no sampling needed */
float3 transmittance = volume_color_transmittance(coeff.sigma_a, t);
float3 transmittance = volume_color_transmittance(coeff.sigma_t, t);
new_tp = *throughput * transmittance;
}
/* integrate emission attenuated by extinction */
if(L && (closure_flag & SD_EMISSION)) {
float3 sigma_t = coeff.sigma_a + coeff.sigma_s;
float3 sigma_t = coeff.sigma_t;
float3 transmittance = volume_color_transmittance(sigma_t, ray->t);
float3 emission = kernel_volume_emission_integrate(&coeff, closure_flag, transmittance, ray->t);
path_radiance_accum_emission(L, state, *throughput, emission);
}
/* modify throughput */
if(closure_flag & (SD_ABSORPTION|SD_SCATTER)) {
if(closure_flag & SD_EXTINCTION) {
*throughput = new_tp;
/* prepare to scatter to new direction */
@ -508,10 +497,10 @@ ccl_device VolumeIntegrateResult kernel_volume_integrate_heterogeneous_distance(
/* distance sampling */
#ifdef __VOLUME_SCATTER__
if((closure_flag & SD_SCATTER) || (has_scatter && (closure_flag & SD_ABSORPTION))) {
if((closure_flag & SD_SCATTER) || (has_scatter && (closure_flag & SD_EXTINCTION))) {
has_scatter = true;
float3 sigma_t = coeff.sigma_a + coeff.sigma_s;
float3 sigma_t = coeff.sigma_t;
float3 sigma_s = coeff.sigma_s;
/* compute transmittance over full step */
@ -545,11 +534,9 @@ ccl_device VolumeIntegrateResult kernel_volume_integrate_heterogeneous_distance(
}
else
#endif
if(closure_flag & SD_ABSORPTION) {
if(closure_flag & SD_EXTINCTION) {
/* absorption only, no sampling needed */
float3 sigma_a = coeff.sigma_a;
transmittance = volume_color_transmittance(sigma_a, dt);
transmittance = volume_color_transmittance(coeff.sigma_t, dt);
new_tp = tp * transmittance;
}
@ -560,7 +547,7 @@ ccl_device VolumeIntegrateResult kernel_volume_integrate_heterogeneous_distance(
}
/* modify throughput */
if(closure_flag & (SD_ABSORPTION|SD_SCATTER)) {
if(closure_flag & SD_EXTINCTION) {
tp = new_tp;
/* stop if nearly all light blocked */
@ -735,7 +722,7 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg, PathState *sta
/* compute segment */
if(volume_shader_sample(kg, sd, state, new_P, &coeff)) {
int closure_flag = sd->flag;
float3 sigma_t = coeff.sigma_a + coeff.sigma_s;
float3 sigma_t = coeff.sigma_t;
/* compute accumulated transmittance */
float3 transmittance = volume_color_transmittance(sigma_t, dt);

@ -74,7 +74,7 @@ void kernel_const_copy(KernelGlobals *kg, const char *name, void *host, size_t s
void kernel_tex_copy(KernelGlobals *kg,
const char *name,
device_ptr mem,
void *mem,
size_t size)
{
if(0) {

@ -38,6 +38,7 @@
#include "kernel/kernel_compat_cpu.h"
#include "kernel/closure/alloc.h"
#include "kernel/closure/emissive.h"
CCL_NAMESPACE_BEGIN
@ -53,7 +54,7 @@ class GenericBackgroundClosure : public CClosurePrimitive {
public:
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
{
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_BACKGROUND_ID, weight);
background_setup(sd, weight);
}
};

@ -56,8 +56,7 @@ class GenericEmissiveClosure : public CClosurePrimitive {
public:
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
{
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_EMISSION_ID, weight);
sd->flag |= SD_EMISSION;
emission_setup(sd, weight);
}
};

@ -92,9 +92,6 @@ BSDF_CLOSURE_CLASS_BEGIN(Refraction, refraction, MicrofacetBsdf, LABEL_SINGULAR)
CLOSURE_FLOAT_PARAM(RefractionClosure, params.ior),
BSDF_CLOSURE_CLASS_END(Refraction, refraction)
BSDF_CLOSURE_CLASS_BEGIN(Transparent, transparent, ShaderClosure, LABEL_SINGULAR)
BSDF_CLOSURE_CLASS_END(Transparent, transparent)
BSDF_CLOSURE_CLASS_BEGIN(AshikhminVelvet, ashikhmin_velvet, VelvetBsdf, LABEL_DIFFUSE)
CLOSURE_FLOAT3_PARAM(AshikhminVelvetClosure, params.N),
CLOSURE_FLOAT_PARAM(AshikhminVelvetClosure, params.sigma),
@ -171,13 +168,6 @@ BSDF_CLOSURE_CLASS_BEGIN(HairTransmission, hair_transmission, HairBsdf, LABEL_GL
CLOSURE_FLOAT_PARAM(HairReflectionClosure, params.offset),
BSDF_CLOSURE_CLASS_END(HairTransmission, hair_transmission)
VOLUME_CLOSURE_CLASS_BEGIN(VolumeHenyeyGreenstein, henyey_greenstein, HenyeyGreensteinVolume, LABEL_VOLUME_SCATTER)
CLOSURE_FLOAT_PARAM(VolumeHenyeyGreensteinClosure, params.g),
VOLUME_CLOSURE_CLASS_END(VolumeHenyeyGreenstein, henyey_greenstein)
VOLUME_CLOSURE_CLASS_BEGIN(VolumeAbsorption, absorption, ShaderClosure, LABEL_SINGULAR)
VOLUME_CLOSURE_CLASS_END(VolumeAbsorption, absorption)
BSDF_CLOSURE_CLASS_BEGIN(PrincipledDiffuse, principled_diffuse, PrincipledDiffuseBsdf, LABEL_DIFFUSE)
CLOSURE_FLOAT3_PARAM(PrincipledDiffuseClosure, params.N),
CLOSURE_FLOAT_PARAM(PrincipledDiffuseClosure, params.roughness),
@ -261,7 +251,7 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
register_closure(ss, "refraction", id++,
bsdf_refraction_params(), bsdf_refraction_prepare);
register_closure(ss, "transparent", id++,
bsdf_transparent_params(), bsdf_transparent_prepare);
closure_bsdf_transparent_params(), closure_bsdf_transparent_prepare);
register_closure(ss, "microfacet_ggx", id++,
bsdf_microfacet_ggx_params(), bsdf_microfacet_ggx_prepare);
register_closure(ss, "microfacet_ggx_aniso", id++,
@ -332,9 +322,9 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
bsdf_hair_transmission_params(), bsdf_hair_transmission_prepare);
register_closure(ss, "henyey_greenstein", id++,
volume_henyey_greenstein_params(), volume_henyey_greenstein_prepare);
closure_henyey_greenstein_params(), closure_henyey_greenstein_prepare);
register_closure(ss, "absorption", id++,
volume_absorption_params(), volume_absorption_prepare);
closure_absorption_params(), closure_absorption_prepare);
}
/* BSDF Closure */
@ -637,5 +627,76 @@ ClosureParam *closure_bsdf_microfacet_multi_ggx_glass_fresnel_params()
}
CCLOSURE_PREPARE(closure_bsdf_microfacet_multi_ggx_glass_fresnel_prepare, MicrofacetMultiGGXGlassFresnelClosure);
/* Transparent */
class TransparentClosure : public CBSDFClosure {
public:
ShaderClosure params;
float3 unused;
void setup(ShaderData *sd, int path_flag, float3 weight)
{
bsdf_transparent_setup(sd, weight);
}
};
ClosureParam *closure_bsdf_transparent_params()
{
static ClosureParam params[] = {
CLOSURE_STRING_KEYPARAM(TransparentClosure, label, "label"),
CLOSURE_FINISH_PARAM(TransparentClosure)
};
return params;
}
CCLOSURE_PREPARE(closure_bsdf_transparent_prepare, TransparentClosure)
/* Volume */
class VolumeAbsorptionClosure : public CBSDFClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
{
volume_extinction_setup(sd, weight);
}
};
ClosureParam *closure_absorption_params()
{
static ClosureParam params[] = {
CLOSURE_STRING_KEYPARAM(VolumeAbsorptionClosure, label, "label"),
CLOSURE_FINISH_PARAM(VolumeAbsorptionClosure)
};
return params;
}
CCLOSURE_PREPARE(closure_absorption_prepare, VolumeAbsorptionClosure)
class VolumeHenyeyGreensteinClosure : public CBSDFClosure {
public:
HenyeyGreensteinVolume params;
void setup(ShaderData *sd, int path_flag, float3 weight)
{
volume_extinction_setup(sd, weight);
HenyeyGreensteinVolume *volume = (HenyeyGreensteinVolume*)bsdf_alloc_osl(sd, sizeof(HenyeyGreensteinVolume), weight, &params);
sd->flag |= (volume) ? volume_henyey_greenstein_setup(volume) : 0;
}
};
ClosureParam *closure_henyey_greenstein_params()
{
static ClosureParam params[] = {
CLOSURE_FLOAT_PARAM(VolumeHenyeyGreensteinClosure, params.g),
CLOSURE_STRING_KEYPARAM(VolumeHenyeyGreensteinClosure, label, "label"),
CLOSURE_FINISH_PARAM(VolumeHenyeyGreensteinClosure)
};
return params;
}
CCLOSURE_PREPARE(closure_henyey_greenstein_prepare, VolumeHenyeyGreensteinClosure)
CCL_NAMESPACE_END

@ -48,11 +48,13 @@ OSL::ClosureParam *closure_holdout_params();
OSL::ClosureParam *closure_ambient_occlusion_params();
OSL::ClosureParam *closure_bsdf_diffuse_ramp_params();
OSL::ClosureParam *closure_bsdf_phong_ramp_params();
OSL::ClosureParam *closure_bsdf_transparent_params();
OSL::ClosureParam *closure_bssrdf_cubic_params();
OSL::ClosureParam *closure_bssrdf_gaussian_params();
OSL::ClosureParam *closure_bssrdf_burley_params();
OSL::ClosureParam *closure_bssrdf_principled_params();
OSL::ClosureParam *closure_henyey_greenstein_volume_params();
OSL::ClosureParam *closure_absorption_params();
OSL::ClosureParam *closure_henyey_greenstein_params();
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_params();
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_glass_params();
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_aniso_params();
@ -69,11 +71,13 @@ void closure_holdout_prepare(OSL::RendererServices *, int id, void *data);
void closure_ambient_occlusion_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_diffuse_ramp_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_phong_ramp_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_transparent_prepare(OSL::RendererServices *, int id, void *data);
void closure_bssrdf_cubic_prepare(OSL::RendererServices *, int id, void *data);
void closure_bssrdf_gaussian_prepare(OSL::RendererServices *, int id, void *data);
void closure_bssrdf_burley_prepare(OSL::RendererServices *, int id, void *data);
void closure_bssrdf_principled_prepare(OSL::RendererServices *, int id, void *data);
void closure_henyey_greenstein_volume_prepare(OSL::RendererServices *, int id, void *data);
void closure_absorption_prepare(OSL::RendererServices *, int id, void *data);
void closure_henyey_greenstein_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_microfacet_multi_ggx_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_microfacet_multi_ggx_glass_prepare(OSL::RendererServices *, int id, void *data);
void closure_bsdf_microfacet_multi_ggx_aniso_prepare(OSL::RendererServices *, int id, void *data);
@ -147,36 +151,6 @@ static ClosureParam *bsdf_##lower##_params() \
\
CCLOSURE_PREPARE_STATIC(bsdf_##lower##_prepare, Upper##Closure)
/* Volume */
#define VOLUME_CLOSURE_CLASS_BEGIN(Upper, lower, structname, TYPE) \
\
class Upper##Closure : public CBSDFClosure { \
public: \
structname params; \
\
void setup(ShaderData *sd, int path_flag, float3 weight) \
{ \
structname *volume = (structname*)bsdf_alloc_osl(sd, sizeof(structname), weight, &params); \
sd->flag |= (volume) ? volume_##lower##_setup(volume) : 0; \
} \
}; \
\
static ClosureParam *volume_##lower##_params() \
{ \
static ClosureParam params[] = {
/* parameters */
#define VOLUME_CLOSURE_CLASS_END(Upper, lower) \
CLOSURE_STRING_KEYPARAM(Upper##Closure, label, "label"), \
CLOSURE_FINISH_PARAM(Upper##Closure) \
}; \
return params; \
} \
\
CCLOSURE_PREPARE_STATIC(volume_##lower##_prepare, Upper##Closure)
CCL_NAMESPACE_END
#endif /* __OSL_CLOSURES_H__ */

@ -122,7 +122,12 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg,
*/
*throughput = make_float3(1.0f, 1.0f, 1.0f);
path_radiance_init(L, kernel_data.film.use_light_pass);
path_state_init(kg, &kernel_split_state.sd_DL_shadow[ray_index], state, rng_hash, sample, ray);
path_state_init(kg,
AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]),
state,
rng_hash,
sample,
ray);
#ifdef __SUBSURFACE__
kernel_path_subsurface_init_indirect(&kernel_split_state.ss_rays[ray_index]);
#endif

@ -98,7 +98,16 @@ ccl_device void kernel_direct_lighting(KernelGlobals *kg,
BsdfEval L_light;
bool is_lamp;
if(direct_emission(kg, sd, &kernel_split_state.sd_DL_shadow[ray_index], &ls, state, &light_ray, &L_light, &is_lamp, terminate)) {
if(direct_emission(kg,
sd,
AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]),
&ls,
state,
&light_ray,
&L_light,
&is_lamp,
terminate))
{
/* Write intermediate data to global memory to access from
* the next kernel.
*/

@ -31,7 +31,7 @@ ccl_device_noinline bool kernel_split_branched_path_volume_indirect_light_iter(K
ShaderData *sd = &kernel_split_state.sd[ray_index];
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
/* GPU: no decoupled ray marching, scatter probalistically */
int num_samples = kernel_data.integrator.volume_samples;
@ -141,7 +141,7 @@ ccl_device void kernel_do_volume(KernelGlobals *kg)
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
ccl_global Intersection *isect = &kernel_split_state.isect[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
bool hit = ! IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND);

@ -101,7 +101,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
throughput = kernel_split_state.throughput[ray_index];

@ -55,9 +55,9 @@ ccl_device void kernel_indirect_background(KernelGlobals *kg)
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
float3 throughput = kernel_split_state.throughput[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
kernel_path_background(kg, state, ray, throughput, emission_sd, L);
kernel_path_background(kg, state, ray, throughput, sd, L);
kernel_split_path_end(kg, ray_index);
}
}

@ -58,9 +58,9 @@ ccl_device void kernel_lamp_emission(KernelGlobals *kg)
float3 throughput = kernel_split_state.throughput[ray_index];
Ray ray = kernel_split_state.ray[ray_index];
ccl_global Intersection *isect = &kernel_split_state.isect[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
kernel_path_lamp_emission(kg, state, &ray, throughput, isect, emission_sd, L);
kernel_path_lamp_emission(kg, state, &ray, throughput, isect, sd, L);
}
}

@ -64,7 +64,7 @@ ccl_device void kernel_path_init(KernelGlobals *kg) {
kernel_split_state.throughput[ray_index] = make_float3(1.0f, 1.0f, 1.0f);
path_radiance_init(&kernel_split_state.path_radiance[ray_index], kernel_data.film.use_light_pass);
path_state_init(kg,
&kernel_split_state.sd_DL_shadow[ray_index],
AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]),
&kernel_split_state.path_state[ray_index],
rng_hash,
sample,

@ -50,7 +50,7 @@ ccl_device void kernel_shader_eval(KernelGlobals *kg)
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
shader_eval_surface(kg, &kernel_split_state.sd[ray_index], state, state->flag);
shader_eval_surface(kg, &kernel_split_state.sd[ray_index], state, state->flag, MAX_CLOSURE);
#ifdef __BRANCHED_PATH__
if(kernel_data.integrator.branched) {
shader_merge_closures(&kernel_split_state.sd[ray_index]);

@ -34,7 +34,7 @@ ccl_device void kernel_shadow_blocked_ao(KernelGlobals *kg)
}
ShaderData *sd = &kernel_split_state.sd[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
float3 throughput = kernel_split_state.throughput[ray_index];

@ -47,7 +47,7 @@ ccl_device void kernel_shadow_blocked_dl(KernelGlobals *kg)
float3 throughput = kernel_split_state.throughput[ray_index];
BsdfEval L_light = kernel_split_state.bsdf_eval[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
bool is_lamp = kernel_split_state.is_lamp[ray_index];
# if defined(__BRANCHED_PATH__) || defined(__SHADOW_TRICKS__)

@ -111,7 +111,7 @@ typedef ccl_global struct SplitBranchedState {
SPLIT_DATA_ENTRY(ccl_global int, queue_data, (NUM_QUEUES*2)) /* TODO(mai): this is too large? */ \
SPLIT_DATA_ENTRY(ccl_global uint, buffer_offset, 1) \
SPLIT_DATA_ENTRY(ShaderData, sd, 1) \
SPLIT_DATA_ENTRY(ShaderData, sd_DL_shadow, 1) \
SPLIT_DATA_ENTRY(ShaderDataTinyStorage, sd_DL_shadow, 1) \
SPLIT_DATA_SUBSURFACE_ENTRIES \
SPLIT_DATA_VOLUME_ENTRIES \
SPLIT_DATA_BRANCHED_ENTRIES \
@ -127,7 +127,7 @@ typedef ccl_global struct SplitBranchedState {
SPLIT_DATA_ENTRY(ccl_global int, is_lamp, 1) \
SPLIT_DATA_ENTRY(ccl_global Ray, light_ray, 1) \
SPLIT_DATA_ENTRY(ShaderData, sd, 1) \
SPLIT_DATA_ENTRY(ShaderData, sd_DL_shadow, 1) \
SPLIT_DATA_ENTRY(ShaderDataTinyStorage, sd_DL_shadow, 1) \
SPLIT_DATA_SUBSURFACE_ENTRIES \
SPLIT_DATA_VOLUME_ENTRIES \
SPLIT_DATA_BRANCHED_ENTRIES \

@ -39,7 +39,7 @@ ccl_device_noinline bool kernel_split_branched_path_subsurface_indirect_light_it
ShaderData *sd = &branched_state->sd;
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
for(int i = branched_state->ss_next_closure; i < sd->num_closure; i++) {
ShaderClosure *sc = &sd->closure[i];
@ -229,7 +229,7 @@ ccl_device void kernel_subsurface_scatter(KernelGlobals *kg)
ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
ccl_global SubsurfaceIndirectRays *ss_indirect = &kernel_split_state.ss_rays[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
if(sd->flag & SD_BSSRDF) {

@ -207,7 +207,9 @@ ccl_device_noinline void svm_eval_nodes(KernelGlobals *kg, ShaderData *sd, ccl_a
break;
}
case NODE_CLOSURE_BSDF:
if(type == SHADER_TYPE_SURFACE) {
svm_node_closure_bsdf(kg, sd, stack, node, path_flag, &offset);
}
break;
case NODE_CLOSURE_EMISSION:
svm_node_closure_emission(sd, stack, node);
@ -325,7 +327,9 @@ ccl_device_noinline void svm_eval_nodes(KernelGlobals *kg, ShaderData *sd, ccl_a
break;
# if NODES_FEATURE(NODE_FEATURE_VOLUME)
case NODE_CLOSURE_VOLUME:
if(type == SHADER_TYPE_VOLUME) {
svm_node_closure_volume(kg, sd, stack, node, path_flag);
}
break;
# endif /* NODES_FEATURE(NODE_FEATURE_VOLUME) */
# ifdef __EXTRA_NODES__

@ -446,12 +446,7 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg, ShaderData *sd, float *
}
case CLOSURE_BSDF_TRANSPARENT_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
ShaderClosure *bsdf = bsdf_alloc(sd, sizeof(ShaderClosure), weight);
if(bsdf) {
bsdf->N = N;
sd->flag |= bsdf_transparent_setup(bsdf);
}
bsdf_transparent_setup(sd, weight);
break;
}
case CLOSURE_BSDF_REFLECTION_ID:
@ -708,18 +703,12 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg, ShaderData *sd, float *
float3 weight = sd->svm_closure_weight * mix_weight;
if(sd->flag & SD_BACKFACING && sd->type & PRIMITIVE_ALL_CURVE) {
ShaderClosure *bsdf = bsdf_alloc(sd, sizeof(ShaderClosure), weight);
if(bsdf) {
bsdf->N = N;
/* todo: giving a fixed weight here will cause issues when
* mixing multiple BSDFS. energy will not be conserved and
* the throughput can blow up after multiple bounces. we
* better figure out a way to skip backfaces from rays
* spawned by transmission from the front */
bsdf->weight = make_float3(1.0f, 1.0f, 1.0f);
sd->flag |= bsdf_transparent_setup(bsdf);
}
bsdf_transparent_setup(sd, make_float3(1.0f, 1.0f, 1.0f));
}
else {
HairBsdf *bsdf = (HairBsdf*)bsdf_alloc(sd, sizeof(HairBsdf), weight);
@ -831,38 +820,37 @@ ccl_device void svm_node_closure_volume(KernelGlobals *kg, ShaderData *sd, float
return;
float param1 = (stack_valid(param1_offset))? stack_load_float(stack, param1_offset): __uint_as_float(node.z);
/* Compute scattering coefficient. */
float density = mix_weight * fmaxf(param1, 0.0f);
float3 weight = sd->svm_closure_weight;
if(type == CLOSURE_VOLUME_ABSORPTION_ID) {
weight = make_float3(1.0f, 1.0f, 1.0f) - weight;
}
weight *= density;
/* Add closure for volume scattering. */
if(type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
float param2 = (stack_valid(param2_offset))? stack_load_float(stack, param2_offset): __uint_as_float(node.w);
float density = fmaxf(param1, 0.0f);
switch(type) {
case CLOSURE_VOLUME_ABSORPTION_ID: {
float3 weight = (make_float3(1.0f, 1.0f, 1.0f) - sd->svm_closure_weight) * mix_weight * density;
ShaderClosure *sc = closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_NONE_ID, weight);
if(sc) {
sd->flag |= volume_absorption_setup(sc);
}
break;
}
case CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID: {
float3 weight = sd->svm_closure_weight * mix_weight * density;
HenyeyGreensteinVolume *volume = (HenyeyGreensteinVolume*)bsdf_alloc(sd, sizeof(HenyeyGreensteinVolume), weight);
if(volume) {
volume->g = param2; /* g */
sd->flag |= volume_henyey_greenstein_setup(volume);
}
break;
}
default:
break;
}
/* Sum total extinction weight. */
volume_extinction_setup(sd, weight);
#endif
}
ccl_device void svm_node_closure_emission(ShaderData *sd, float *stack, uint4 node)
{
uint mix_weight_offset = node.y;
float3 weight = sd->svm_closure_weight;
if(stack_valid(mix_weight_offset)) {
float mix_weight = stack_load_float(stack, mix_weight_offset);
@ -870,17 +858,16 @@ ccl_device void svm_node_closure_emission(ShaderData *sd, float *stack, uint4 no
if(mix_weight == 0.0f)
return;
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_EMISSION_ID, sd->svm_closure_weight * mix_weight);
weight *= mix_weight;
}
else
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_EMISSION_ID, sd->svm_closure_weight);
sd->flag |= SD_EMISSION;
emission_setup(sd, weight);
}
ccl_device void svm_node_closure_background(ShaderData *sd, float *stack, uint4 node)
{
uint mix_weight_offset = node.y;
float3 weight = sd->svm_closure_weight;
if(stack_valid(mix_weight_offset)) {
float mix_weight = stack_load_float(stack, mix_weight_offset);
@ -888,10 +875,10 @@ ccl_device void svm_node_closure_background(ShaderData *sd, float *stack, uint4
if(mix_weight == 0.0f)
return;
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_BACKGROUND_ID, sd->svm_closure_weight * mix_weight);
weight *= mix_weight;
}
else
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_BACKGROUND_ID, sd->svm_closure_weight);
background_setup(sd, weight);
}
ccl_device void svm_node_closure_holdout(ShaderData *sd, float *stack, uint4 node)

@ -445,8 +445,6 @@ typedef enum ClosureType {
CLOSURE_BSSRDF_BURLEY_ID,
/* Other */
CLOSURE_EMISSION_ID,
CLOSURE_BACKGROUND_ID,
CLOSURE_HOLDOUT_ID,
CLOSURE_AMBIENT_OCCLUSION_ID,
@ -478,9 +476,7 @@ typedef enum ClosureType {
#define CLOSURE_IS_VOLUME(type) (type >= CLOSURE_VOLUME_ID && type <= CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID)
#define CLOSURE_IS_VOLUME_SCATTER(type) (type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID)
#define CLOSURE_IS_VOLUME_ABSORPTION(type) (type == CLOSURE_VOLUME_ABSORPTION_ID)
#define CLOSURE_IS_EMISSION(type) (type == CLOSURE_EMISSION_ID)
#define CLOSURE_IS_HOLDOUT(type) (type == CLOSURE_HOLDOUT_ID)
#define CLOSURE_IS_BACKGROUND(type) (type == CLOSURE_BACKGROUND_ID)
#define CLOSURE_IS_AMBIENT_OCCLUSION(type) (type == CLOSURE_AMBIENT_OCCLUSION_ID)
#define CLOSURE_IS_PHASE(type) (type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID)
#define CLOSURE_IS_GLASS(type) (type >= CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID && type <= CLOSURE_BSDF_SHARP_GLASS_ID)

@ -201,7 +201,7 @@ bool BakeManager::bake(Device *device, DeviceScene *dscene, Scene *scene, Progre
/* read result */
int k = 0;
float4 *offset = (float4*)d_output.data_pointer;
float4 *offset = d_output.data();
size_t depth = 4;
for(size_t i=shader_offset; i < (shader_offset + shader_size); i++) {

@ -173,8 +173,8 @@ bool RenderBuffers::get_denoising_pass_rect(int offset, float exposure, int samp
/* Approximate variance as E[x^2] - 1/N * (E[x])^2, since online variance
* update does not work efficiently with atomics in the kernel. */
int mean_offset = offset - components;
float *mean = (float*)buffer.data_pointer + mean_offset;
float *var = (float*)buffer.data_pointer + offset;
float *mean = buffer.data() + mean_offset;
float *var = buffer.data() + offset;
assert(mean_offset >= 0);
if(components == 1) {
@ -194,7 +194,7 @@ bool RenderBuffers::get_denoising_pass_rect(int offset, float exposure, int samp
}
}
else {
float *in = (float*)buffer.data_pointer + offset;
float *in = buffer.data() + offset;
if(components == 1) {
for(int i = 0; i < size; i++, in += pass_stride, pixels++) {
@ -228,7 +228,7 @@ bool RenderBuffers::get_pass_rect(PassType type, float exposure, int sample, int
continue;
}
float *in = (float*)buffer.data_pointer + pass_offset;
float *in = buffer.data() + pass_offset;
int pass_stride = params.get_passes_size();
float scale = (pass.filter)? 1.0f/(float)sample: 1.0f;
@ -295,7 +295,7 @@ bool RenderBuffers::get_pass_rect(PassType type, float exposure, int sample, int
pass_offset += color_pass.components;
}
float *in_divide = (float*)buffer.data_pointer + pass_offset;
float *in_divide = buffer.data() + pass_offset;
for(int i = 0; i < size; i++, in += pass_stride, in_divide += pass_stride, pixels += 3) {
float3 f = make_float3(in[0], in[1], in[2]);
@ -344,7 +344,7 @@ bool RenderBuffers::get_pass_rect(PassType type, float exposure, int sample, int
pass_offset += color_pass.components;
}
float *in_weight = (float*)buffer.data_pointer + pass_offset;
float *in_weight = buffer.data() + pass_offset;
for(int i = 0; i < size; i++, in += pass_stride, in_weight += pass_stride, pixels += 4) {
float4 f = make_float4(in[0], in[1], in[2], in[3]);

@ -79,7 +79,7 @@ static void shade_background_pixels(Device *device, DeviceScene *dscene, int res
d_input.free();
float4 *d_output_data = reinterpret_cast<float4*>(d_output.data_pointer);
float4 *d_output_data = d_output.data();
pixels.resize(width*height);

@ -436,6 +436,8 @@ Mesh::Mesh()
face_offset = 0;
corner_offset = 0;
attr_map_offset = 0;
num_subd_verts = 0;
attributes.triangle_mesh = this;
@ -1258,33 +1260,27 @@ void MeshManager::update_svm_attributes(Device *, DeviceScene *dscene, Scene *sc
* attribute, based on a unique shader attribute id. */
/* compute array stride */
int attr_map_stride = 0;
int attr_map_size = 0;
for(size_t i = 0; i < scene->meshes.size(); i++)
attr_map_stride = max(attr_map_stride, (mesh_attributes[i].size() + 1)*ATTR_PRIM_TYPES);
for(size_t i = 0; i < scene->meshes.size(); i++) {
Mesh *mesh = scene->meshes[i];
mesh->attr_map_offset = attr_map_size;
attr_map_size += (mesh_attributes[i].size() + 1)*ATTR_PRIM_TYPES;
}
if(attr_map_stride == 0)
if(attr_map_size == 0)
return;
/* create attribute map */
uint4 *attr_map = dscene->attributes_map.alloc(attr_map_stride*scene->objects.size());
uint4 *attr_map = dscene->attributes_map.alloc(attr_map_size*scene->meshes.size());
memset(attr_map, 0, dscene->attributes_map.size()*sizeof(uint));
for(size_t i = 0; i < scene->objects.size(); i++) {
Object *object = scene->objects[i];
Mesh *mesh = object->mesh;
/* find mesh attributes */
size_t j;
for(j = 0; j < scene->meshes.size(); j++)
if(scene->meshes[j] == mesh)
break;
AttributeRequestSet& attributes = mesh_attributes[j];
for(size_t i = 0; i < scene->meshes.size(); i++) {
Mesh *mesh = scene->meshes[i];
AttributeRequestSet& attributes = mesh_attributes[i];
/* set object attributes */
int index = i*attr_map_stride;
int index = mesh->attr_map_offset;
foreach(AttributeRequest& req, attributes.requests) {
uint id;
@ -1358,7 +1354,6 @@ void MeshManager::update_svm_attributes(Device *, DeviceScene *dscene, Scene *sc
}
/* copy to device */
dscene->data.bvh.attributes_map_stride = attr_map_stride;
dscene->attributes_map.copy_to_device();
}
@ -1625,6 +1620,12 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
if(dscene->attributes_uchar4.size()) {
dscene->attributes_uchar4.copy_to_device();
}
if(progress.get_cancel()) return;
/* After mesh attributes and patch tables have been copied to device memory,
* we need to update offsets in the objects. */
scene->object_manager->device_update_mesh_offsets(device, dscene, scene);
}
void MeshManager::mesh_calc_offset(Scene *scene)
@ -2042,10 +2043,6 @@ void MeshManager::device_update(Device *device, DeviceScene *dscene, Scene *scen
}
if(progress.get_cancel()) return;
/* after mesh data has been copied to device memory we need to update
* offsets for patch tables as this can't be known before hand */
scene->object_manager->device_update_patch_map_offsets(device, dscene, scene);
device_update_attributes(device, dscene, scene, progress);
if(progress.get_cancel()) return;

@ -250,6 +250,8 @@ public:
size_t face_offset;
size_t corner_offset;
size_t attr_map_offset;
size_t num_subd_verts;
/* Functions */

@ -149,7 +149,7 @@ bool MeshManager::displace(Device *device, DeviceScene *dscene, Scene *scene, Me
done.resize(num_verts, false);
int k = 0;
float4 *offset = (float4*)d_output.data_pointer;
float4 *offset = d_output.data();
Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
for(size_t i = 0; i < num_triangles; i++) {

@ -476,7 +476,6 @@ class EmissionNode : public ShaderNode {
public:
SHADER_NODE_CLASS(EmissionNode)
void constant_fold(const ConstantFolder& folder);
virtual ClosureType get_closure_type() { return CLOSURE_EMISSION_ID; }
bool has_surface_emission() { return true; }
bool has_volume_support() { return true; }
@ -490,7 +489,6 @@ class BackgroundNode : public ShaderNode {
public:
SHADER_NODE_CLASS(BackgroundNode)
void constant_fold(const ConstantFolder& folder);
virtual ClosureType get_closure_type() { return CLOSURE_BACKGROUND_ID; }
float3 color;
float strength;

@ -589,7 +589,7 @@ void ObjectManager::device_update_flags(Device *,
return;
/* object info flag */
uint *object_flag = dscene->object_flag.get_data();
uint *object_flag = dscene->object_flag.data();
vector<Object *> volume_objects;
bool has_volume_objects = false;
@ -641,21 +641,20 @@ void ObjectManager::device_update_flags(Device *,
dscene->object_flag.copy_to_device();
}
void ObjectManager::device_update_patch_map_offsets(Device *, DeviceScene *dscene, Scene *scene)
void ObjectManager::device_update_mesh_offsets(Device *, DeviceScene *dscene, Scene *scene)
{
if(scene->objects.size() == 0) {
return;
}
uint4* objects = (uint4*)dscene->objects.get_data();
uint4* objects = (uint4*)dscene->objects.data();
bool update = false;
int object_index = 0;
foreach(Object *object, scene->objects) {
int offset = object_index*OBJECT_SIZE + 11;
foreach(Object *object, scene->objects) {
Mesh* mesh = object->mesh;
int offset = object_index*OBJECT_SIZE + 11;
if(mesh->patch_table) {
uint patch_map_offset = 2*(mesh->patch_table_offset + mesh->patch_table->total_size() -
@ -667,6 +666,11 @@ void ObjectManager::device_update_patch_map_offsets(Device *, DeviceScene *dscen
}
}
if(objects[offset].y != mesh->attr_map_offset) {
objects[offset].y = mesh->attr_map_offset;
update = true;
}
object_index++;
}

@ -104,7 +104,7 @@ public:
Scene *scene,
Progress& progress,
bool bounds_valid = true);
void device_update_patch_map_offsets(Device *device, DeviceScene *dscene, Scene *scene);
void device_update_mesh_offsets(Device *device, DeviceScene *dscene, Scene *scene);
void device_free(Device *device, DeviceScene *dscene);

@ -87,7 +87,7 @@ size_t LookupTables::add_table(DeviceScene *dscene, vector<float>& data)
}
/* copy table data and return offset */
float *dtable = dscene->lookup_table.get_data();
float *dtable = dscene->lookup_table.data();
memcpy(dtable + new_table.offset, &data[0], sizeof(float) * data.size());
return new_table.offset;

@ -706,7 +706,7 @@ void BKE_pose_eval_flush(const struct EvaluationContext *UNUSED(eval_ctx),
void BKE_pose_eval_proxy_copy(const struct EvaluationContext *UNUSED(eval_ctx), Object *ob)
{
BLI_assert(ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from != NULL);
BLI_assert(ID_IS_LINKED(ob) && ob->proxy_from != NULL);
DEBUG_PRINT("%s on %s\n", __func__, ob->id.name);
if (BKE_pose_copy_result(ob->pose, ob->proxy_from->pose) == false) {
printf("Proxy copy error, lib Object: %s proxy Object: %s\n",

@ -425,7 +425,7 @@ void BKE_bpath_traverse_id(Main *bmain, ID *id, BPathVisitor visit_cb, const int
{
const char *absbase = (flag & BKE_BPATH_TRAVERSE_ABS) ? ID_BLEND_PATH(bmain, id) : NULL;
if ((flag & BKE_BPATH_TRAVERSE_SKIP_LIBRARY) && ID_IS_LINKED_DATABLOCK(id)) {
if ((flag & BKE_BPATH_TRAVERSE_SKIP_LIBRARY) && ID_IS_LINKED(id)) {
return;
}

@ -229,7 +229,7 @@ void BKE_brush_make_local(Main *bmain, Brush *brush, const bool lib_local)
* - mixed: make copy
*/
if (!ID_IS_LINKED_DATABLOCK(brush)) {
if (!ID_IS_LINKED(brush)) {
return;
}

@ -4724,7 +4724,7 @@ void BKE_constraints_id_loop(ListBase *conlist, ConstraintIDFunc func, void *use
/* helper for BKE_constraints_copy(), to be used for making sure that ID's are valid */
static void con_extern_cb(bConstraint *UNUSED(con), ID **idpoin, bool UNUSED(is_reference), void *UNUSED(userData))
{
if (*idpoin && ID_IS_LINKED_DATABLOCK(*idpoin))
if (*idpoin && ID_IS_LINKED(*idpoin))
id_lib_extern(*idpoin);
}

@ -164,7 +164,7 @@ void BKE_id_lib_local_paths(Main *bmain, Library *lib, ID *id)
void id_lib_extern(ID *id)
{
if (id && ID_IS_LINKED_DATABLOCK(id)) {
if (id && ID_IS_LINKED(id)) {
BLI_assert(BKE_idcode_is_linkable(GS(id->name)));
if (id->tag & LIB_TAG_INDIRECT) {
id->tag -= LIB_TAG_INDIRECT;
@ -311,7 +311,7 @@ void BKE_id_expand_local(Main *bmain, ID *id)
*/
void BKE_id_copy_ensure_local(Main *bmain, const ID *old_id, ID *new_id)
{
if (ID_IS_LINKED_DATABLOCK(old_id)) {
if (ID_IS_LINKED(old_id)) {
BKE_id_expand_local(bmain, new_id);
BKE_id_lib_local_paths(bmain, old_id->lib, new_id);
}
@ -330,7 +330,7 @@ void BKE_id_make_local_generic(Main *bmain, ID *id, const bool id_in_mainlist, c
* In case we make a whole lib's content local, we always want to localize, and we skip remapping (done later).
*/
if (!ID_IS_LINKED_DATABLOCK(id)) {
if (!ID_IS_LINKED(id)) {
return;
}
@ -972,7 +972,7 @@ void BKE_main_lib_objects_recalc_all(Main *bmain)
/* flag for full recalc */
for (ob = bmain->object.first; ob; ob = ob->id.next) {
if (ID_IS_LINKED_DATABLOCK(ob)) {
if (ID_IS_LINKED(ob)) {
DEG_id_tag_update(&ob->id, OB_RECALC_OB | OB_RECALC_DATA | OB_RECALC_TIME);
}
}
@ -1656,7 +1656,7 @@ static ID *is_dupid(ListBase *lb, ID *id, const char *name)
for (idtest = lb->first; idtest; idtest = idtest->next) {
/* if idtest is not a lib */
if (id != idtest && !ID_IS_LINKED_DATABLOCK(idtest)) {
if (id != idtest && !ID_IS_LINKED(idtest)) {
/* do not test alphabetic! */
/* optimized */
if (idtest->name[2] == name[0]) {
@ -1721,7 +1721,7 @@ static bool check_for_dupid(ListBase *lb, ID *id, char *name)
for (idtest = lb->first; idtest; idtest = idtest->next) {
int nrtest;
if ( (id != idtest) &&
!ID_IS_LINKED_DATABLOCK(idtest) &&
!ID_IS_LINKED(idtest) &&
(*name == *(idtest->name + 2)) &&
STREQLEN(name, idtest->name + 2, left_len) &&
(BLI_split_name_num(leftest, &nrtest, idtest->name + 2, '.') == left_len)
@ -1803,7 +1803,7 @@ bool new_id(ListBase *lb, ID *id, const char *tname)
char name[MAX_ID_NAME - 2];
/* if library, don't rename */
if (ID_IS_LINKED_DATABLOCK(id))
if (ID_IS_LINKED(id))
return false;
/* if no name given, use name of current ID

@ -375,7 +375,7 @@ void BKE_library_foreach_ID_link(Main *bmain, ID *id, LibraryIDLinkCallback call
for (; id != NULL; id = (flag & IDWALK_RECURSE) ? BLI_LINKSTACK_POP(data.ids_todo) : NULL) {
data.self_id = id;
data.cb_flag = ID_IS_LINKED_DATABLOCK(id) ? IDWALK_CB_INDIRECT_USAGE : 0;
data.cb_flag = ID_IS_LINKED(id) ? IDWALK_CB_INDIRECT_USAGE : 0;
if (bmain != NULL && bmain->relations != NULL && (flag & IDWALK_READONLY)) {
/* Note that this is minor optimization, even in worst cases (like id being an object with lots of
@ -563,7 +563,7 @@ void BKE_library_foreach_ID_link(Main *bmain, ID *id, LibraryIDLinkCallback call
* Since this field is set/owned by 'user' of this ID (and not ID itself), it is only indirect usage
* if proxy object is linked... Twisted. */
if (object->proxy_from) {
data.cb_flag = ID_IS_LINKED_DATABLOCK(object->proxy_from) ? IDWALK_CB_INDIRECT_USAGE : 0;
data.cb_flag = ID_IS_LINKED(object->proxy_from) ? IDWALK_CB_INDIRECT_USAGE : 0;
}
CALLBACK_INVOKE(object->proxy_from, IDWALK_CB_LOOPBACK);
data.cb_flag = data_cb_flag;

@ -760,7 +760,7 @@ static int id_relink_to_newid_looper(void *UNUSED(user_data), ID *UNUSED(self_id
*/
void BKE_libblock_relink_to_newid(ID *id)
{
if (ID_IS_LINKED_DATABLOCK(id))
if (ID_IS_LINKED(id))
return;
BKE_library_foreach_ID_link(NULL, id, id_relink_to_newid_looper, NULL, 0);

@ -725,8 +725,8 @@ void assign_material(Object *ob, Material *ma, short act, int assign_type)
if (act < 1) act = 1;
/* prevent crashing when using accidentally */
BLI_assert(!ID_IS_LINKED_DATABLOCK(ob));
if (ID_IS_LINKED_DATABLOCK(ob)) return;
BLI_assert(!ID_IS_LINKED(ob));
if (ID_IS_LINKED(ob)) return;
/* test arraylens */
@ -999,7 +999,7 @@ static void do_init_render_material(Material *ma, int r_mode, float *amb)
Group *group;
for (group = G.main->group.first; group; group = group->id.next) {
if (!ID_IS_LINKED_DATABLOCK(group) && STREQ(group->id.name, ma->group->id.name)) {
if (!ID_IS_LINKED(group) && STREQ(group->id.name, ma->group->id.name)) {
ma->group = group;
}
}

@ -733,7 +733,7 @@ void test_object_modifiers(Object *ob)
*/
const char *modifier_path_relbase(Object *ob)
{
if (G.relbase_valid || ID_IS_LINKED_DATABLOCK(ob)) {
if (G.relbase_valid || ID_IS_LINKED(ob)) {
return ID_BLEND_PATH(G.main, &ob->id);
}
else {

@ -2630,7 +2630,7 @@ void BKE_node_clipboard_add_node(bNode *node)
node_info->id = node->id;
if (node->id) {
BLI_strncpy(node_info->id_name, node->id->name, sizeof(node_info->id_name));
if (ID_IS_LINKED_DATABLOCK(node->id)) {
if (ID_IS_LINKED(node->id)) {
BLI_strncpy(node_info->library_name, node->id->lib->filepath, sizeof(node_info->library_name));
}
else {

@ -1288,7 +1288,7 @@ void BKE_object_make_local_ex(Main *bmain, Object *ob, const bool lib_local, con
* In case we make a whole lib's content local, we always want to localize, and we skip remapping (done later).
*/
if (!ID_IS_LINKED_DATABLOCK(ob)) {
if (!ID_IS_LINKED(ob)) {
return;
}
@ -1330,15 +1330,15 @@ void BKE_object_make_local(Main *bmain, Object *ob, const bool lib_local)
/* Returns true if the Object is from an external blend file (libdata) */
bool BKE_object_is_libdata(Object *ob)
{
return (ob && ID_IS_LINKED_DATABLOCK(ob));
return (ob && ID_IS_LINKED(ob));
}
/* Returns true if the Object data is from an external blend file (libdata) */
bool BKE_object_obdata_is_libdata(Object *ob)
{
/* Linked objects with local obdata are forbidden! */
BLI_assert(!ob || !ob->data || (ID_IS_LINKED_DATABLOCK(ob) ? ID_IS_LINKED_DATABLOCK(ob->data) : true));
return (ob && ob->data && ID_IS_LINKED_DATABLOCK(ob->data));
BLI_assert(!ob || !ob->data || (ID_IS_LINKED(ob) ? ID_IS_LINKED(ob->data) : true));
return (ob && ob->data && ID_IS_LINKED(ob->data));
}
/* *************** PROXY **************** */
@ -1385,7 +1385,7 @@ void BKE_object_copy_proxy_drivers(Object *ob, Object *target)
/* only on local objects because this causes indirect links
* 'a -> b -> c', blend to point directly to a.blend
* when a.blend has a proxy thats linked into c.blend */
if (!ID_IS_LINKED_DATABLOCK(ob))
if (!ID_IS_LINKED(ob))
id_lib_extern((ID *)dtar->id);
}
}
@ -1403,7 +1403,7 @@ void BKE_object_copy_proxy_drivers(Object *ob, Object *target)
void BKE_object_make_proxy(Object *ob, Object *target, Object *gob)
{
/* paranoia checks */
if (ID_IS_LINKED_DATABLOCK(ob) || !ID_IS_LINKED_DATABLOCK(target)) {
if (ID_IS_LINKED(ob) || !ID_IS_LINKED(target)) {
printf("cannot make proxy\n");
return;
}
@ -2715,7 +2715,7 @@ void BKE_object_handle_update_ex(const EvaluationContext *eval_ctx,
printf("recalcob %s\n", ob->id.name + 2);
/* handle proxy copy for target */
if (ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from) {
if (ID_IS_LINKED(ob) && ob->proxy_from) {
// printf("ob proxy copy, lib ob %s proxy %s\n", ob->id.name, ob->proxy_from->id.name);
if (ob->proxy_from->proxy_group) { /* transform proxy into group space */
Object *obg = ob->proxy_from->proxy_group;

@ -190,7 +190,7 @@ void BKE_object_handle_data_update(
break;
}
case OB_ARMATURE:
if (ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from) {
if (ID_IS_LINKED(ob) && ob->proxy_from) {
if (BKE_pose_copy_result(ob->pose, ob->proxy_from->pose) == false) {
printf("Proxy copy error, lib Object: %s proxy Object: %s\n",
ob->id.name + 2, ob->proxy_from->id.name + 2);
@ -280,7 +280,7 @@ void BKE_object_eval_uber_transform(const EvaluationContext *UNUSED(eval_ctx),
// XXX: it's almost redundant now...
/* Handle proxy copy for target, */
if (ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from) {
if (ID_IS_LINKED(ob) && ob->proxy_from) {
if (ob->proxy_from->proxy_group) {
/* Transform proxy into group space. */
Object *obg = ob->proxy_from->proxy_group;

@ -232,7 +232,7 @@ void packAll(Main *bmain, ReportList *reports, bool verbose)
int tot = 0;
for (ima = bmain->image.first; ima; ima = ima->id.next) {
if (BKE_image_has_packedfile(ima) == false && !ID_IS_LINKED_DATABLOCK(ima)) {
if (BKE_image_has_packedfile(ima) == false && !ID_IS_LINKED(ima)) {
if (ima->source == IMA_SRC_FILE) {
BKE_image_packfiles(reports, ima, ID_BLEND_PATH(bmain, &ima->id));
tot ++;
@ -245,14 +245,14 @@ void packAll(Main *bmain, ReportList *reports, bool verbose)
}
for (vfont = bmain->vfont.first; vfont; vfont = vfont->id.next) {
if (vfont->packedfile == NULL && !ID_IS_LINKED_DATABLOCK(vfont) && BKE_vfont_is_builtin(vfont) == false) {
if (vfont->packedfile == NULL && !ID_IS_LINKED(vfont) && BKE_vfont_is_builtin(vfont) == false) {
vfont->packedfile = newPackedFile(reports, vfont->name, bmain->name);
tot ++;
}
}
for (sound = bmain->sound.first; sound; sound = sound->id.next) {
if (sound->packedfile == NULL && !ID_IS_LINKED_DATABLOCK(sound)) {
if (sound->packedfile == NULL && !ID_IS_LINKED(sound)) {
sound->packedfile = newPackedFile(reports, sound->name, bmain->name);
tot++;
}

@ -2807,7 +2807,7 @@ static void lib_link_workspaces(FileData *fd, Main *bmain)
if (screen) {
BKE_workspace_layout_screen_set(layout, screen);
if (ID_IS_LINKED_DATABLOCK(id)) {
if (ID_IS_LINKED(id)) {
screen->winid = 0;
if (screen->temp) {
/* delete temp layouts when appending */
@ -2835,7 +2835,7 @@ static void direct_link_workspace(FileData *fd, WorkSpace *workspace, const Main
relation->value = newdataadr(fd, relation->value);
}
if (ID_IS_LINKED_DATABLOCK(&workspace->id)) {
if (ID_IS_LINKED(&workspace->id)) {
/* Appending workspace so render layer is likely from a different scene. Unset
* now, when activating workspace later we set a valid one from current scene. */
BKE_workspace_render_layer_set(workspace, NULL);

@ -450,7 +450,7 @@ void DepsgraphNodeBuilder::build_object(Scene *scene, Object *ob)
break;
case OB_ARMATURE: /* Pose */
if (ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from != NULL) {
if (ID_IS_LINKED(ob) && ob->proxy_from != NULL) {
build_proxy_rig(ob);
}
else {

@ -517,7 +517,7 @@ void DepsgraphRelationBuilder::build_object(Main *bmain, Scene *scene, Object *o
}
case OB_ARMATURE: /* Pose */
if (ID_IS_LINKED_DATABLOCK(ob) && ob->proxy_from != NULL) {
if (ID_IS_LINKED(ob) && ob->proxy_from != NULL) {
build_proxy_rig(ob);
}
else {

@ -1630,7 +1630,7 @@ static void DRW_shgroup_relationship_lines(OBJECT_StorageList *stl, Object *ob)
static void DRW_shgroup_object_center(OBJECT_StorageList *stl, Object *ob, SceneLayer *sl, View3D *v3d)
{
const bool is_library = ob->id.us > 1 || ID_IS_LINKED_DATABLOCK(ob);
const bool is_library = ob->id.us > 1 || ID_IS_LINKED(ob);
DRWShadingGroup *shgroup;
if (ob == OBACT_NEW(sl)) {

@ -1324,7 +1324,7 @@ static size_t animfilter_action(bAnimContext *ac, ListBase *anim_data, bDopeShee
/* don't include anything from this action if it is linked in from another file,
* and we're getting stuff for editing...
*/
if ((filter_mode & ANIMFILTER_FOREDIT) && ID_IS_LINKED_DATABLOCK(act))
if ((filter_mode & ANIMFILTER_FOREDIT) && ID_IS_LINKED(act))
return 0;
/* do groups */

@ -705,7 +705,7 @@ void draw_fcurve_channel(View2D *v2d, AnimData *adt, FCurve *fcu, float ypos, fl
bool locked = (fcu->flag & FCURVE_PROTECTED) ||
((fcu->grp) && (fcu->grp->flag & AGRP_PROTECTED)) ||
((adt && adt->action) && ID_IS_LINKED_DATABLOCK(adt->action));
((adt && adt->action) && ID_IS_LINKED(adt->action));
BLI_dlrbTree_init(&keys);
BLI_dlrbTree_init(&blocks);
@ -726,7 +726,7 @@ void draw_agroup_channel(View2D *v2d, AnimData *adt, bActionGroup *agrp, float y
DLRBT_Tree keys, blocks;
bool locked = (agrp->flag & AGRP_PROTECTED) ||
((adt && adt->action) && ID_IS_LINKED_DATABLOCK(adt->action));
((adt && adt->action) && ID_IS_LINKED(adt->action));
BLI_dlrbTree_init(&keys);
BLI_dlrbTree_init(&blocks);
@ -746,7 +746,7 @@ void draw_action_channel(View2D *v2d, AnimData *adt, bAction *act, float ypos, f
{
DLRBT_Tree keys, blocks;
bool locked = (act && ID_IS_LINKED_DATABLOCK(act));
bool locked = (act && ID_IS_LINKED(act));
BLI_dlrbTree_init(&keys);
BLI_dlrbTree_init(&blocks);

@ -87,7 +87,7 @@ void ED_armature_enter_posemode(bContext *C, Base *base)
ReportList *reports = CTX_wm_reports(C);
Object *ob = base->object;
if (ID_IS_LINKED_DATABLOCK(ob)) {
if (ID_IS_LINKED(ob)) {
BKE_report(reports, RPT_WARNING, "Cannot pose libdata");
return;
}

@ -185,7 +185,7 @@ static int has_poselib_pose_data_poll(bContext *C)
static int has_poselib_pose_data_for_editing_poll(bContext *C)
{
Object *ob = get_poselib_object(C);
return (ob && ob->poselib && !ID_IS_LINKED_DATABLOCK(ob->poselib));
return (ob && ob->poselib && !ID_IS_LINKED(ob->poselib));
}
/* ----------------------------------- */
@ -387,7 +387,7 @@ static int poselib_add_poll(bContext *C)
if (ED_operator_posemode(C)) {
Object *ob = get_poselib_object(C);
if (ob) {
if ((ob->poselib == NULL) || !ID_IS_LINKED_DATABLOCK(ob->poselib)) {
if ((ob->poselib == NULL) || !ID_IS_LINKED(ob->poselib)) {
return true;
}
}

@ -841,7 +841,7 @@ static int depthdropper_init(bContext *C, wmOperator *op)
RegionView3D *rv3d = CTX_wm_region_view3d(C);
if (rv3d && rv3d->persp == RV3D_CAMOB) {
View3D *v3d = CTX_wm_view3d(C);
if (v3d->camera && v3d->camera->data && !ID_IS_LINKED_DATABLOCK(v3d->camera->data)) {
if (v3d->camera && v3d->camera->data && !ID_IS_LINKED(v3d->camera->data)) {
RNA_id_pointer_create(v3d->camera->data, &ddr->ptr);
ddr->prop = RNA_struct_find_property(&ddr->ptr, "dof_distance");
}
@ -1095,7 +1095,7 @@ static int depthdropper_poll(bContext *C)
RegionView3D *rv3d = CTX_wm_region_view3d(C);
if (rv3d && rv3d->persp == RV3D_CAMOB) {
View3D *v3d = CTX_wm_view3d(C);
if (v3d->camera && v3d->camera->data && !ID_IS_LINKED_DATABLOCK(v3d->camera->data)) {
if (v3d->camera && v3d->camera->data && !ID_IS_LINKED(v3d->camera->data)) {
return 1;
}
}

@ -563,7 +563,7 @@ bool UI_context_copy_to_selected_list(
if ((id_data == NULL) ||
(id_data->tag & LIB_TAG_DOIT) == 0 ||
ID_IS_LINKED_DATABLOCK(id_data) ||
ID_IS_LINKED(id_data) ||
(GS(id_data->name) != id_code))
{
BLI_remlink(&lb, link);

@ -492,7 +492,7 @@ static uiTooltipData *ui_tooltip_data_from_button(bContext *C, uiBut *but)
if (but->rnapoin.id.data) {
const ID *id = but->rnapoin.id.data;
if (ID_IS_LINKED_DATABLOCK(id)) {
if (ID_IS_LINKED(id)) {
uiTooltipField *field = text_field_add(
data, &(uiTooltipFormat){
.style = UI_TIP_STYLE_NORMAL,

@ -1245,7 +1245,7 @@ static uiLayout *draw_modifier(
}
UI_block_lock_clear(block);
UI_block_lock_set(block, ob && ID_IS_LINKED_DATABLOCK(ob), ERROR_LIBDATA_MESSAGE);
UI_block_lock_set(block, ob && ID_IS_LINKED(ob), ERROR_LIBDATA_MESSAGE);
if (!ELEM(md->type, eModifierType_Fluidsim, eModifierType_Softbody, eModifierType_ParticleSystem,
eModifierType_Cloth, eModifierType_Smoke))
@ -1292,7 +1292,7 @@ uiLayout *uiTemplateModifier(uiLayout *layout, bContext *C, PointerRNA *ptr)
return NULL;
}
UI_block_lock_set(uiLayoutGetBlock(layout), (ob && ID_IS_LINKED_DATABLOCK(ob)), ERROR_LIBDATA_MESSAGE);
UI_block_lock_set(uiLayoutGetBlock(layout), (ob && ID_IS_LINKED(ob)), ERROR_LIBDATA_MESSAGE);
/* find modifier and draw it */
cageIndex = modifiers_getCageIndex(scene, ob, &lastCageIndex, 0);
@ -1520,7 +1520,7 @@ uiLayout *uiTemplateConstraint(uiLayout *layout, PointerRNA *ptr)
return NULL;
}
UI_block_lock_set(uiLayoutGetBlock(layout), (ob && ID_IS_LINKED_DATABLOCK(ob)), ERROR_LIBDATA_MESSAGE);
UI_block_lock_set(uiLayoutGetBlock(layout), (ob && ID_IS_LINKED(ob)), ERROR_LIBDATA_MESSAGE);
/* hrms, the temporal constraint should not draw! */
if (con->type == CONSTRAINT_TYPE_KINEMATIC) {
@ -1882,7 +1882,7 @@ void uiTemplateColorRamp(uiLayout *layout, PointerRNA *ptr, const char *propname
block = uiLayoutAbsoluteBlock(layout);
id = cptr.id.data;
UI_block_lock_set(block, (id && ID_IS_LINKED_DATABLOCK(id)), ERROR_LIBDATA_MESSAGE);
UI_block_lock_set(block, (id && ID_IS_LINKED(id)), ERROR_LIBDATA_MESSAGE);
colorband_buttons_layout(layout, block, cptr.data, &rect, cb, expand);
@ -2542,7 +2542,7 @@ void uiTemplateCurveMapping(
cb->prop = prop;
id = cptr.id.data;
UI_block_lock_set(block, (id && ID_IS_LINKED_DATABLOCK(id)), ERROR_LIBDATA_MESSAGE);
UI_block_lock_set(block, (id && ID_IS_LINKED(id)), ERROR_LIBDATA_MESSAGE);
curvemap_buttons_layout(layout, &cptr, type, levels, brush, neg_slope, cb);

@ -3333,7 +3333,7 @@ static int edbm_separate_exec(bContext *C, wmOperator *op)
Object *ob = base_iter->object;
if (ob->type == OB_MESH) {
Mesh *me = ob->data;
if (!ID_IS_LINKED_DATABLOCK(me)) {
if (!ID_IS_LINKED(me)) {
BMesh *bm_old = NULL;
int retval_iter = 0;

@ -501,7 +501,7 @@ static int layers_poll(bContext *C)
{
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && ob->type == OB_MESH && data && !ID_IS_LINKED_DATABLOCK(data));
return (ob && !ID_IS_LINKED(ob) && ob->type == OB_MESH && data && !ID_IS_LINKED(data));
}
static int mesh_uv_texture_add_exec(bContext *C, wmOperator *UNUSED(op))
@ -749,7 +749,7 @@ static int mesh_customdata_mask_clear_poll(bContext *C)
return false;
}
if (!ID_IS_LINKED_DATABLOCK(me)) {
if (!ID_IS_LINKED(me)) {
CustomData *data = GET_CD_DATA(me, vdata);
if (CustomData_has_layer(data, CD_PAINT_MASK)) {
return true;
@ -803,7 +803,7 @@ static int mesh_customdata_skin_state(bContext *C)
if (ob && ob->type == OB_MESH) {
Mesh *me = ob->data;
if (!ID_IS_LINKED_DATABLOCK(me)) {
if (!ID_IS_LINKED(me)) {
CustomData *data = GET_CD_DATA(me, vdata);
return CustomData_has_layer(data, CD_MVERT_SKIN);
}

@ -1278,7 +1278,7 @@ static int object_delete_exec(bContext *C, wmOperator *op)
if (use_global) {
Scene *scene_iter;
for (scene_iter = bmain->scene.first; scene_iter; scene_iter = scene_iter->id.next) {
if (scene_iter != scene && !ID_IS_LINKED_DATABLOCK(scene_iter)) {
if (scene_iter != scene && !ID_IS_LINKED(scene_iter)) {
if (is_indirectly_used && ID_REAL_USERS(ob) <= 1 && ID_EXTRA_USERS(ob) == 0) {
BKE_reportf(op->reports, RPT_WARNING,
"Cannot delete object '%s' from scene '%s', indirectly used objects need at least one user",
@ -1668,8 +1668,8 @@ static int convert_poll(bContext *C)
Object *obact = CTX_data_active_object(C);
Scene *scene = CTX_data_scene(C);
return (!ID_IS_LINKED_DATABLOCK(scene) && obact && scene->obedit != obact &&
(obact->flag & SELECT) && !ID_IS_LINKED_DATABLOCK(obact));
return (!ID_IS_LINKED(scene) && obact && scene->obedit != obact &&
(obact->flag & SELECT) && !ID_IS_LINKED(obact));
}
/* Helper for convert_exec */
@ -1753,7 +1753,7 @@ static int convert_exec(bContext *C, wmOperator *op)
* However, changing this is more design than bugfix, not to mention convoluted code below,
* so that will be for later.
* But at the very least, do not do that with linked IDs! */
if ((ID_IS_LINKED_DATABLOCK(ob) || (ob->data && ID_IS_LINKED_DATABLOCK(ob->data))) && !keep_original) {
if ((ID_IS_LINKED(ob) || (ob->data && ID_IS_LINKED(ob->data))) && !keep_original) {
keep_original = true;
BKE_reportf(op->reports, RPT_INFO,
"Converting some linked object/object data, enforcing 'Keep Original' option to True");
@ -2500,7 +2500,7 @@ static int join_poll(bContext *C)
{
Object *ob = CTX_data_active_object(C);
if (!ob || ID_IS_LINKED_DATABLOCK(ob)) return 0;
if (!ob || ID_IS_LINKED(ob)) return 0;
if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_ARMATURE))
return ED_operator_screenactive(C);
@ -2553,7 +2553,7 @@ static int join_shapes_poll(bContext *C)
{
Object *ob = CTX_data_active_object(C);
if (!ob || ID_IS_LINKED_DATABLOCK(ob)) return 0;
if (!ob || ID_IS_LINKED(ob)) return 0;
/* only meshes supported at the moment */
if (ob->type == OB_MESH)

@ -593,7 +593,7 @@ static int edit_constraint_poll_generic(bContext *C, StructRNA *rna_type)
return 0;
}
if (ID_IS_LINKED_DATABLOCK(ob) || (ptr.id.data && ID_IS_LINKED_DATABLOCK(ptr.id.data))) {
if (ID_IS_LINKED(ob) || (ptr.id.data && ID_IS_LINKED(ptr.id.data))) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit library data");
return 0;
}

@ -303,7 +303,7 @@ static void data_transfer_exec_preprocess_objects(
}
me = ob->data;
if (ID_IS_LINKED_DATABLOCK(me)) {
if (ID_IS_LINKED(me)) {
/* Do not transfer to linked data, not supported. */
BKE_reportf(op->reports, RPT_WARNING, "Skipping object '%s', linked data '%s' cannot be modified",
ob->id.name + 2, me->id.name + 2);
@ -333,7 +333,7 @@ static bool data_transfer_exec_is_object_valid(
me->id.tag &= ~LIB_TAG_DOIT;
return true;
}
else if (!ID_IS_LINKED_DATABLOCK(me)) {
else if (!ID_IS_LINKED(me)) {
/* Do not transfer apply operation more than once. */
/* XXX This is not nice regarding vgroups, which are half-Object data... :/ */
BKE_reportf(op->reports, RPT_WARNING,
@ -393,7 +393,7 @@ static int data_transfer_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
if (reverse_transfer && ID_IS_LINKED_DATABLOCK(ob_src->data)) {
if (reverse_transfer && ID_IS_LINKED(ob_src->data)) {
/* Do not transfer to linked data, not supported. */
return OPERATOR_CANCELLED;
}

@ -294,7 +294,7 @@ void ED_object_editmode_enter(bContext *C, int flag)
Object *ob;
bool ok = false;
if (ID_IS_LINKED_DATABLOCK(scene)) return;
if (ID_IS_LINKED(scene)) return;
if ((flag & EM_IGNORE_LAYER) == 0) {
ob = CTX_data_active_object(C); /* active layer checked here for view3d */
@ -356,7 +356,7 @@ void ED_object_editmode_enter(bContext *C, int flag)
* BKE_object_obdata_is_libdata that prevent the bugfix #6614, so
* i add this little hack here.
*/
if (ID_IS_LINKED_DATABLOCK(arm)) {
if (ID_IS_LINKED(arm)) {
error_libdata();
return;
}
@ -440,7 +440,7 @@ static int editmode_toggle_poll(bContext *C)
Object *ob = CTX_data_active_object(C);
/* covers proxies too */
if (ELEM(NULL, ob, ob->data) || ID_IS_LINKED_DATABLOCK(ob->data))
if (ELEM(NULL, ob, ob->data) || ID_IS_LINKED(ob->data))
return 0;
/* if hidden but in edit mode, we still display */
@ -667,7 +667,7 @@ static void copy_attr(Main *bmain, Scene *scene, SceneLayer *sl, short event)
Nurb *nu;
bool do_depgraph_update = false;
if (ID_IS_LINKED_DATABLOCK(scene)) return;
if (ID_IS_LINKED(scene)) return;
if (!(ob = OBACT_NEW(sl))) return;
@ -1258,7 +1258,7 @@ static int shade_smooth_exec(bContext *C, wmOperator *op)
{
data = ob->data;
if (data && ID_IS_LINKED_DATABLOCK(data)) {
if (data && ID_IS_LINKED(data)) {
linked_data = true;
continue;
}
@ -1342,7 +1342,7 @@ static void UNUSED_FUNCTION(image_aspect) (Scene *scene, SceneLayer *sl)
int a, b, done;
if (scene->obedit) return; // XXX get from context
if (ID_IS_LINKED_DATABLOCK(scene)) return;
if (ID_IS_LINKED(scene)) return;
for (base = FIRSTBASE_NEW(sl); base; base = base->next) {
if (TESTBASELIB_NEW(base)) {

@ -827,9 +827,9 @@ int edit_modifier_poll_generic(bContext *C, StructRNA *rna_type, int obtype_flag
PointerRNA ptr = CTX_data_pointer_get_type(C, "modifier", rna_type);
Object *ob = (ptr.id.data) ? ptr.id.data : ED_object_active_context(C);
if (!ob || ID_IS_LINKED_DATABLOCK(ob)) return 0;
if (!ob || ID_IS_LINKED(ob)) return 0;
if (obtype_flag && ((1 << ob->type) & obtype_flag) == 0) return 0;
if (ptr.id.data && ID_IS_LINKED_DATABLOCK(ptr.id.data)) return 0;
if (ptr.id.data && ID_IS_LINKED(ptr.id.data)) return 0;
return 1;
}

@ -303,17 +303,17 @@ static int make_proxy_invoke(bContext *C, wmOperator *op, const wmEvent *event)
Object *ob = ED_object_active_context(C);
/* sanity checks */
if (!scene || ID_IS_LINKED_DATABLOCK(scene) || !ob)
if (!scene || ID_IS_LINKED(scene) || !ob)
return OPERATOR_CANCELLED;
/* Get object to work on - use a menu if we need to... */
if (ob->dup_group && ID_IS_LINKED_DATABLOCK(ob->dup_group)) {
if (ob->dup_group && ID_IS_LINKED(ob->dup_group)) {
/* gives menu with list of objects in group */
/* proxy_group_objects_menu(C, op, ob, ob->dup_group); */
WM_enum_search_invoke(C, op, event);
return OPERATOR_CANCELLED;
}
else if (ID_IS_LINKED_DATABLOCK(ob)) {
else if (ID_IS_LINKED(ob)) {
uiPopupMenu *pup = UI_popup_menu_begin(C, IFACE_("OK?"), ICON_QUESTION);
uiLayout *layout = UI_popup_menu_layout(pup);
@ -1372,7 +1372,7 @@ static int make_links_scene_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
if (ID_IS_LINKED_DATABLOCK(scene_to)) {
if (ID_IS_LINKED(scene_to)) {
BKE_report(op->reports, RPT_ERROR, "Cannot link objects into a linked scene");
return OPERATOR_CANCELLED;
}
@ -1485,7 +1485,7 @@ static int make_links_data_exec(bContext *C, wmOperator *op)
case MAKE_LINKS_ANIMDATA:
BKE_animdata_copy_id(bmain, (ID *)ob_dst, (ID *)ob_src, false);
if (ob_dst->data && ob_src->data) {
if (ID_IS_LINKED_DATABLOCK(obdata_id)) {
if (ID_IS_LINKED(obdata_id)) {
is_lib = true;
break;
}
@ -1527,7 +1527,7 @@ static int make_links_data_exec(bContext *C, wmOperator *op)
Curve *cu_src = ob_src->data;
Curve *cu_dst = ob_dst->data;
if (ID_IS_LINKED_DATABLOCK(obdata_id)) {
if (ID_IS_LINKED(obdata_id)) {
is_lib = true;
break;
}
@ -1638,7 +1638,7 @@ void OBJECT_OT_make_links_data(wmOperatorType *ot)
static Object *single_object_users_object(Main *bmain, Scene *scene, Object *ob, const bool copy_groups)
{
if (!ID_IS_LINKED_DATABLOCK(ob) && ob->id.us > 1) {
if (!ID_IS_LINKED(ob) && ob->id.us > 1) {
/* base gets copy of object */
Object *obn = ID_NEW_SET(ob, BKE_object_copy(bmain, ob));
@ -1776,7 +1776,7 @@ static void new_id_matar(Main *bmain, Material **matar, const int totcol)
for (a = 0; a < totcol; a++) {
id = (ID *)matar[a];
if (id && !ID_IS_LINKED_DATABLOCK(id)) {
if (id && !ID_IS_LINKED(id)) {
if (id->newid) {
matar[a] = (Material *)id->newid;
id_us_plus(id->newid);
@ -1802,10 +1802,10 @@ static void single_obdata_users(Main *bmain, Scene *scene, SceneLayer *sl, const
FOREACH_OBJECT_FLAG(scene, sl, flag, ob)
{
if (!ID_IS_LINKED_DATABLOCK(ob)) {
if (!ID_IS_LINKED(ob)) {
id = ob->data;
if (id && id->us > 1 && !ID_IS_LINKED_DATABLOCK(id)) {
if (id && id->us > 1 && !ID_IS_LINKED(id)) {
DEG_id_tag_update(&ob->id, OB_RECALC_DATA);
switch (ob->type) {
@ -1880,7 +1880,7 @@ static void single_obdata_users(Main *bmain, Scene *scene, SceneLayer *sl, const
static void single_object_action_users(Scene *scene, SceneLayer *sl, const int flag)
{
FOREACH_OBJECT_FLAG(scene, sl, flag, ob)
if (!ID_IS_LINKED_DATABLOCK(ob)) {
if (!ID_IS_LINKED(ob)) {
DEG_id_tag_update(&ob->id, OB_RECALC_DATA);
BKE_animdata_copy_id_action(&ob->id, false);
}
@ -1894,7 +1894,7 @@ static void single_mat_users(Main *bmain, Scene *scene, SceneLayer *sl, const in
int a, b;
FOREACH_OBJECT_FLAG(scene, sl, flag, ob)
if (!ID_IS_LINKED_DATABLOCK(ob)) {
if (!ID_IS_LINKED(ob)) {
for (a = 1; a <= ob->totcol; a++) {
ma = give_current_material(ob, a);
if (ma) {
@ -2050,7 +2050,7 @@ void ED_object_single_users(Main *bmain, Scene *scene, const bool full, const bo
for (Base *base = scene->base.first; base; base = base->next) {
Object *ob = base->object;
if (!ID_IS_LINKED_DATABLOCK(ob)) {
if (!ID_IS_LINKED(ob)) {
IDP_RelinkProperty(ob->id.properties);
}
}
@ -2148,7 +2148,7 @@ static bool make_local_all__instance_indirect_unused(Main *bmain, Scene *scene,
bool changed = false;
for (ob = bmain->object.first; ob; ob = ob->id.next) {
if (ID_IS_LINKED_DATABLOCK(ob) && (ob->id.us == 0)) {
if (ID_IS_LINKED(ob) && (ob->id.us == 0)) {
Base *base;
id_us_plus(&ob->id);

@ -227,7 +227,7 @@ static int shape_key_mode_poll(bContext *C)
{
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data) && ob->mode != OB_MODE_EDIT);
return (ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data) && ob->mode != OB_MODE_EDIT);
}
static int shape_key_mode_exists_poll(bContext *C)
@ -236,7 +236,7 @@ static int shape_key_mode_exists_poll(bContext *C)
ID *data = (ob) ? ob->data : NULL;
/* same as shape_key_mode_poll */
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data) && ob->mode != OB_MODE_EDIT) &&
return (ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data) && ob->mode != OB_MODE_EDIT) &&
/* check a keyblock exists */
(BKE_keyblock_from_object(ob) != NULL);
}
@ -248,7 +248,7 @@ static int shape_key_move_poll(bContext *C)
ID *data = (ob) ? ob->data : NULL;
Key *key = BKE_key_from_object(ob);
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data) &&
return (ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data) &&
ob->mode != OB_MODE_EDIT && key && key->totkey > 1);
}
@ -256,7 +256,7 @@ static int shape_key_poll(bContext *C)
{
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data));
return (ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data));
}
static int shape_key_add_exec(bContext *C, wmOperator *op)

@ -447,7 +447,7 @@ static int apply_objects_internal(
changed = false;
}
if (ID_IS_LINKED_DATABLOCK(obdata)) {
if (ID_IS_LINKED(obdata)) {
BKE_reportf(reports, RPT_ERROR,
"Cannot apply to library data: Object \"%s\", %s \"%s\", aborting",
ob->id.name + 2, BKE_idcode_to_name(GS(obdata->name)), obdata->name + 2);
@ -860,7 +860,7 @@ static int object_origin_set_exec(bContext *C, wmOperator *op)
if (ob->data == NULL) {
/* special support for dupligroups */
if ((ob->transflag & OB_DUPLIGROUP) && ob->dup_group && (ob->dup_group->id.tag & LIB_TAG_DOIT) == 0) {
if (ID_IS_LINKED_DATABLOCK(ob->dup_group)) {
if (ID_IS_LINKED(ob->dup_group)) {
tot_lib_error++;
}
else {
@ -885,7 +885,7 @@ static int object_origin_set_exec(bContext *C, wmOperator *op)
}
}
}
else if (ID_IS_LINKED_DATABLOCK(ob->data)) {
else if (ID_IS_LINKED(ob->data)) {
tot_lib_error++;
}

@ -2464,8 +2464,8 @@ static int vertex_group_poll(bContext *C)
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) &&
data && !ID_IS_LINKED_DATABLOCK(data) &&
return (ob && !ID_IS_LINKED(ob) &&
data && !ID_IS_LINKED(data) &&
OB_TYPE_SUPPORT_VGROUP(ob->type) &&
ob->defbase.first);
}
@ -2474,8 +2474,8 @@ static int vertex_group_supported_poll(bContext *C)
{
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && OB_TYPE_SUPPORT_VGROUP(ob->type) &&
data && !ID_IS_LINKED_DATABLOCK(data));
return (ob && !ID_IS_LINKED(ob) && OB_TYPE_SUPPORT_VGROUP(ob->type) &&
data && !ID_IS_LINKED(data));
}
static int vertex_group_mesh_poll(bContext *C)
@ -2483,8 +2483,8 @@ static int vertex_group_mesh_poll(bContext *C)
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) &&
data && !ID_IS_LINKED_DATABLOCK(data) &&
return (ob && !ID_IS_LINKED(ob) &&
data && !ID_IS_LINKED(data) &&
ob->type == OB_MESH &&
ob->defbase.first);
}
@ -2493,7 +2493,7 @@ static int UNUSED_FUNCTION(vertex_group_mesh_supported_poll)(bContext *C)
{
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
return (ob && !ID_IS_LINKED_DATABLOCK(ob) && ob->type == OB_MESH && data && !ID_IS_LINKED_DATABLOCK(data));
return (ob && !ID_IS_LINKED(ob) && ob->type == OB_MESH && data && !ID_IS_LINKED(data));
}
@ -2502,7 +2502,7 @@ static int UNUSED_FUNCTION(vertex_group_poll_edit) (bContext *C)
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
if (!(ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data)))
if (!(ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data)))
return 0;
return BKE_object_is_in_editmode_vgroup(ob);
@ -2514,7 +2514,7 @@ static int vertex_group_vert_poll_ex(bContext *C, const bool needs_select, const
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
if (!(ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data)))
if (!(ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data)))
return false;
if (ob_type_flag && (((1 << ob->type) & ob_type_flag)) == 0) {
@ -2575,7 +2575,7 @@ static int vertex_group_vert_select_unlocked_poll(bContext *C)
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
if (!(ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data)))
if (!(ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data)))
return 0;
if (!(BKE_object_is_in_editmode_vgroup(ob) ||
@ -2598,7 +2598,7 @@ static int vertex_group_vert_select_mesh_poll(bContext *C)
Object *ob = ED_object_context(C);
ID *data = (ob) ? ob->data : NULL;
if (!(ob && !ID_IS_LINKED_DATABLOCK(ob) && data && !ID_IS_LINKED_DATABLOCK(data)))
if (!(ob && !ID_IS_LINKED(ob) && data && !ID_IS_LINKED(data)))
return 0;
/* only difference to #vertex_group_vert_select_poll */
@ -2790,7 +2790,7 @@ static int vertex_group_select_exec(bContext *C, wmOperator *UNUSED(op))
{
Object *ob = ED_object_context(C);
if (!ob || ID_IS_LINKED_DATABLOCK(ob))
if (!ob || ID_IS_LINKED(ob))
return OPERATOR_CANCELLED;
vgroup_select_verts(ob, 1);

@ -4761,7 +4761,7 @@ static int particle_edit_toggle_poll(bContext *C)
if (ob == NULL || ob->type != OB_MESH)
return 0;
if (!ob->data || ID_IS_LINKED_DATABLOCK(ob->data))
if (!ob->data || ID_IS_LINKED(ob->data))
return 0;
if (CTX_data_edit_object(C))
return 0;

@ -997,7 +997,7 @@ static void remove_particle_systems_from_object(Object *ob_to)
if (ob_to->type != OB_MESH)
return;
if (!ob_to->data || ID_IS_LINKED_DATABLOCK(ob_to->data))
if (!ob_to->data || ID_IS_LINKED(ob_to->data))
return;
for (md = ob_to->modifiers.first; md; md = md_next) {
@ -1038,7 +1038,7 @@ static bool copy_particle_systems_to_object(const bContext *C,
if (ob_to->type != OB_MESH)
return false;
if (!ob_to->data || ID_IS_LINKED_DATABLOCK(ob_to->data))
if (!ob_to->data || ID_IS_LINKED(ob_to->data))
return false;
/* For remapping we need a valid DM.

@ -171,7 +171,7 @@ int ED_operator_screen_mainwinactive(bContext *C)
int ED_operator_scene_editable(bContext *C)
{
Scene *scene = CTX_data_scene(C);
if (scene && !ID_IS_LINKED_DATABLOCK(scene))
if (scene && !ID_IS_LINKED(scene))
return 1;
return 0;
}
@ -181,7 +181,7 @@ int ED_operator_objectmode(bContext *C)
Scene *scene = CTX_data_scene(C);
Object *obact = CTX_data_active_object(C);
if (scene == NULL || ID_IS_LINKED_DATABLOCK(scene))
if (scene == NULL || ID_IS_LINKED(scene))
return 0;
if (CTX_data_edit_object(C))
return 0;
@ -282,7 +282,7 @@ int ED_operator_node_editable(bContext *C)
{
SpaceNode *snode = CTX_wm_space_node(C);
if (snode && snode->edittree && !ID_IS_LINKED_DATABLOCK(snode->edittree))
if (snode && snode->edittree && !ID_IS_LINKED(snode->edittree))
return 1;
return 0;
@ -344,20 +344,20 @@ int ED_operator_object_active(bContext *C)
int ED_operator_object_active_editable(bContext *C)
{
Object *ob = ED_object_active_context(C);
return ((ob != NULL) && !ID_IS_LINKED_DATABLOCK(ob) && !ed_object_hidden(ob));
return ((ob != NULL) && !ID_IS_LINKED(ob) && !ed_object_hidden(ob));
}
int ED_operator_object_active_editable_mesh(bContext *C)
{
Object *ob = ED_object_active_context(C);
return ((ob != NULL) && !ID_IS_LINKED_DATABLOCK(ob) && !ed_object_hidden(ob) &&
(ob->type == OB_MESH) && !ID_IS_LINKED_DATABLOCK(ob->data));
return ((ob != NULL) && !ID_IS_LINKED(ob) && !ed_object_hidden(ob) &&
(ob->type == OB_MESH) && !ID_IS_LINKED(ob->data));
}
int ED_operator_object_active_editable_font(bContext *C)
{
Object *ob = ED_object_active_context(C);
return ((ob != NULL) && !ID_IS_LINKED_DATABLOCK(ob) && !ed_object_hidden(ob) &&
return ((ob != NULL) && !ID_IS_LINKED(ob) && !ed_object_hidden(ob) &&
(ob->type == OB_FONT));
}
@ -450,8 +450,8 @@ int ED_operator_posemode_local(bContext *C)
if (ED_operator_posemode(C)) {
Object *ob = BKE_object_pose_armature_get(CTX_data_active_object(C));
bArmature *arm = ob->data;
return !(ID_IS_LINKED_DATABLOCK(&ob->id) ||
ID_IS_LINKED_DATABLOCK(&arm->id));
return !(ID_IS_LINKED(&ob->id) ||
ID_IS_LINKED(&arm->id));
}
return false;
}

@ -1364,7 +1364,7 @@ static int texture_paint_toggle_poll(bContext *C)
Object *ob = CTX_data_active_object(C);
if (ob == NULL || ob->type != OB_MESH)
return 0;
if (!ob->data || ID_IS_LINKED_DATABLOCK(ob->data))
if (!ob->data || ID_IS_LINKED(ob->data))
return 0;
if (CTX_data_edit_object(C))
return 0;

@ -1115,7 +1115,7 @@ static int paint_poll_test(bContext *C)
Object *ob = CTX_data_active_object(C);
if (ob == NULL || ob->type != OB_MESH)
return 0;
if (!ob->data || ID_IS_LINKED_DATABLOCK(ob->data))
if (!ob->data || ID_IS_LINKED(ob->data))
return 0;
if (CTX_data_edit_object(C))
return 0;

@ -66,7 +66,7 @@ static int edit_sensor_poll(bContext *C)
{
PointerRNA ptr = CTX_data_pointer_get_type(C, "sensor", &RNA_Sensor);
if (ptr.data && ID_IS_LINKED_DATABLOCK(ptr.id.data)) return 0;
if (ptr.data && ID_IS_LINKED(ptr.id.data)) return 0;
return 1;
}
@ -74,7 +74,7 @@ static int edit_controller_poll(bContext *C)
{
PointerRNA ptr = CTX_data_pointer_get_type(C, "controller", &RNA_Controller);
if (ptr.data && ID_IS_LINKED_DATABLOCK(ptr.id.data)) return 0;
if (ptr.data && ID_IS_LINKED(ptr.id.data)) return 0;
return 1;
}
@ -82,7 +82,7 @@ static int edit_actuator_poll(bContext *C)
{
PointerRNA ptr = CTX_data_pointer_get_type(C, "actuator", &RNA_Actuator);
if (ptr.data && ID_IS_LINKED_DATABLOCK(ptr.id.data)) return 0;
if (ptr.data && ID_IS_LINKED(ptr.id.data)) return 0;
return 1;
}

@ -634,7 +634,7 @@ static void outliner_draw_userbuts(uiBlock *block, ARegion *ar, SpaceOops *soops
char buf[16] = "";
int but_flag = UI_BUT_DRAG_LOCK;
if (ID_IS_LINKED_DATABLOCK(id))
if (ID_IS_LINKED(id))
but_flag |= UI_BUT_DISABLED;
UI_block_emboss_set(block, UI_EMBOSS_NONE);
@ -805,7 +805,7 @@ static void tselem_draw_icon_uibut(struct DrawIconArg *arg, int icon)
else {
uiBut *but = uiDefIconBut(arg->block, UI_BTYPE_LABEL, 0, icon, arg->xb, arg->yb, UI_UNIT_X, UI_UNIT_Y, NULL,
0.0, 0.0, 1.0, arg->alpha,
(arg->id && ID_IS_LINKED_DATABLOCK(arg->id)) ? arg->id->lib->name : "");
(arg->id && ID_IS_LINKED(arg->id)) ? arg->id->lib->name : "");
if (arg->id)
UI_but_drag_set_id(but, arg->id);
@ -1443,7 +1443,7 @@ static void outliner_draw_tree_element(
else
offsx += 2 * ufac;
if (tselem->type == 0 && ID_IS_LINKED_DATABLOCK(tselem->id)) {
if (tselem->type == 0 && ID_IS_LINKED(tselem->id)) {
if (tselem->id->tag & LIB_TAG_MISSING) {
UI_icon_draw_alpha((float)startx + offsx + 2 * ufac, (float)*starty + 2 * ufac, ICON_LIBRARY_DATA_BROKEN,
alpha_fac);

Some files were not shown because too many files have changed in this diff Show More