Cycles: Solve possible issues with running out of stack memory allocator
Policy here is a bit more complicated, if tree becomes too deep we're forced to create a leaf node and size of that leaf wouldn't be so well predicted, which means it's quite tricky to use single stack array for that. Made it more official feature that StackAllocator will fall-back to heap when running out of stack memory. It's still much better than always using heap allocator.
This commit is contained in:
parent
5ab3a97dbb
commit
be2186ad62
@ -502,8 +502,6 @@ BVHNode *BVHBuild::create_primitive_leaf_node(const int *p_type,
|
||||
|
||||
BVHNode* BVHBuild::create_leaf_node(const BVHRange& range)
|
||||
{
|
||||
const int MAX_ITEMS_PER_LEAF = 16;
|
||||
|
||||
/* This is a bit overallocating here (considering leaf size into account),
|
||||
* but chunk-based re-allocation in vector makes it difficult to use small
|
||||
* size of stack storage here. Some tweaks are possible tho.
|
||||
@ -513,11 +511,13 @@ BVHNode* BVHBuild::create_leaf_node(const BVHRange& range)
|
||||
* and lots of cache misses.
|
||||
* - If the size is too small, then we can run out of memory
|
||||
* allowed to be used by vector.
|
||||
* In practice it wouldn't mean crash, just allocator will fallback
|
||||
* to heap which is slower.
|
||||
* - Optimistic re-allocation in STL could jump us out of stack usage
|
||||
* because re-allocation happens in chunks and size of those chunks we
|
||||
* can not control.
|
||||
*/
|
||||
typedef StackAllocator<MAX_ITEMS_PER_LEAF * 16, int> LeafStackAllocator;
|
||||
typedef StackAllocator<256, int> LeafStackAllocator;
|
||||
|
||||
vector<int, LeafStackAllocator> p_type[PRIMITIVE_NUM_TOTAL];
|
||||
vector<int, LeafStackAllocator> p_index[PRIMITIVE_NUM_TOTAL];
|
||||
|
@ -54,28 +54,35 @@ public:
|
||||
T *allocate(size_t n, const void *hint = 0)
|
||||
{
|
||||
(void)hint;
|
||||
if(n == 0) {
|
||||
return NULL;
|
||||
}
|
||||
if(pointer_ + n >= SIZE) {
|
||||
assert(!"Stack allocator overallocated");
|
||||
/* NOTE: This is just a safety feature for the release builds, so
|
||||
* we fallback to a less efficient allocation but preventing crash
|
||||
* from happening.
|
||||
*/
|
||||
return (T*)malloc(n * sizeof(T));
|
||||
size_t size = n * sizeof(T);
|
||||
util_guarded_mem_alloc(size);
|
||||
#ifdef WITH_BLENDER_GUARDEDALLOC
|
||||
return (T*)MEM_mallocN_aligned(size, 16, "Cycles Alloc");
|
||||
#else
|
||||
return (T*)malloc(size);
|
||||
#endif
|
||||
}
|
||||
T *mem = &data_[pointer_];
|
||||
pointer_ += n;
|
||||
return mem;
|
||||
}
|
||||
|
||||
void deallocate(T *p, size_t /*n*/)
|
||||
void deallocate(T *p, size_t n)
|
||||
{
|
||||
if(p == NULL) {
|
||||
return;
|
||||
}
|
||||
if(p < data_ || p >= data_ + SIZE) {
|
||||
/* Again this is just a safety feature for the release builds. */
|
||||
assert(!"Should never happen");
|
||||
util_guarded_mem_free(n * sizeof(T));
|
||||
#ifdef WITH_BLENDER_GUARDEDALLOC
|
||||
MEM_freeN(p);
|
||||
#else
|
||||
free(p);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
/* We don't support memory free for the stack allocator. */
|
||||
|
Loading…
Reference in New Issue
Block a user