blender/intern/cycles/util/util_task.cpp
Lukas Toenne 31ed71cb6b Performance fix for Cycles: Don't wait in the main UI thread when resetting devices.
When the scene is updated Cycles resets the renderer device, cancelling
all existing tasks. The main thread would wait for all running tasks to
finish before continuing. This is ok when tasks can actually cancel in a
timely fashion. For OSL however, this does not work, since the OSL
shader group optimization takes quite a bit of time and can not be
easily be cancelled once running (on my crappy machine in full debug
mode: ~0.12 seconds for simple node trees). This would lead to very
laggy UI behavior and make it difficult to accurately control elements
such as sliders.

This patch removes the wait condition from the device->task_cancel
method. Instead it just sets the do_cancel flag and returns. To avoid
backlog in the task pool of the device it will return early from the
BlenderSession::sync function while the reset is going on (tested in
Session::resetting). Once all existing tasks have finished the do_cancel
flag is finally cleared again (checked in TaskPool::num_decrease).

Care has to be taken to avoid race conditions on the do_cancel flag,
since it can now be modified outside the TaskPool::cancel function
itself. For this purpose the scope of the TaskPool::num_mutex locks has
been extended, in most cases the mutex is now locked by the TaskPool
itself before calling TaskScheduler methods, instead of only locking
inside the num_increase/num_decrease functions themselves. The only
occurrence of a lock outside of the TaskPool methods is in
TaskScheduler::thread_run.

This patch is most useful in combination with the OSL renderer mode, so
it can probably wait until after the 2.64 release. SVM tasks tend to be
cancelled quickly, so the effect is less noticeable.
2012-09-11 11:41:51 +00:00

291 lines
5.5 KiB
C++

/*
* Copyright 2011, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "util_debug.h"
#include "util_foreach.h"
#include "util_system.h"
#include "util_task.h"
CCL_NAMESPACE_BEGIN
/* Task Pool */
TaskPool::TaskPool()
{
num = 0;
do_cancel = false;
}
TaskPool::~TaskPool()
{
stop();
}
void TaskPool::push(Task *task, bool front)
{
thread_scoped_lock num_lock(num_mutex);
TaskScheduler::Entry entry;
entry.task = task;
entry.pool = this;
TaskScheduler::push(entry, front);
}
void TaskPool::push(const TaskRunFunction& run, bool front)
{
push(new Task(run), front);
}
void TaskPool::wait_work()
{
thread_scoped_lock num_lock(num_mutex);
while(num != 0) {
num_lock.unlock();
thread_scoped_lock queue_lock(TaskScheduler::queue_mutex);
/* find task from this pool. if we get a task from another pool,
* we can get into deadlock */
TaskScheduler::Entry work_entry;
bool found_entry = false;
list<TaskScheduler::Entry>::iterator it;
for(it = TaskScheduler::queue.begin(); it != TaskScheduler::queue.end(); it++) {
TaskScheduler::Entry& entry = *it;
if(entry.pool == this) {
work_entry = entry;
found_entry = true;
TaskScheduler::queue.erase(it);
break;
}
}
queue_lock.unlock();
/* if found task, do it, otherwise wait until other tasks are done */
if(found_entry) {
/* run task */
work_entry.task->run();
/* delete task */
delete work_entry.task;
/* notify pool task was done */
num_decrease(1);
}
num_lock.lock();
if(num == 0)
break;
if(!found_entry)
num_cond.wait(num_lock);
}
}
void TaskPool::cancel()
{
thread_scoped_lock num_lock(num_mutex);
do_cancel = true;
TaskScheduler::clear(this);
}
void TaskPool::stop()
{
thread_scoped_lock num_lock(num_mutex);
TaskScheduler::clear(this);
assert(num == 0);
}
bool TaskPool::cancelled()
{
return do_cancel;
}
void TaskPool::num_decrease(int done)
{
num -= done;
assert(num >= 0);
if(num == 0) {
do_cancel = false;
num_cond.notify_all();
}
}
void TaskPool::num_increase()
{
num++;
num_cond.notify_all();
}
/* Task Scheduler */
thread_mutex TaskScheduler::mutex;
int TaskScheduler::users = 0;
vector<thread*> TaskScheduler::threads;
vector<int> TaskScheduler::thread_level;
volatile bool TaskScheduler::do_exit = false;
list<TaskScheduler::Entry> TaskScheduler::queue;
thread_mutex TaskScheduler::queue_mutex;
thread_condition_variable TaskScheduler::queue_cond;
void TaskScheduler::init(int num_threads)
{
thread_scoped_lock lock(mutex);
/* multiple cycles instances can use this task scheduler, sharing the same
* threads, so we keep track of the number of users. */
if(users == 0) {
do_exit = false;
/* launch threads that will be waiting for work */
if(num_threads == 0)
num_threads = system_cpu_thread_count();
threads.resize(num_threads);
thread_level.resize(num_threads);
for(size_t i = 0; i < threads.size(); i++) {
threads[i] = new thread(function_bind(&TaskScheduler::thread_run, i));
thread_level[i] = 0;
}
}
users++;
}
void TaskScheduler::exit()
{
thread_scoped_lock lock(mutex);
users--;
if(users == 0) {
/* stop all waiting threads */
do_exit = true;
TaskScheduler::queue_cond.notify_all();
/* delete threads */
foreach(thread *t, threads) {
t->join();
delete t;
}
threads.clear();
thread_level.clear();
}
}
bool TaskScheduler::thread_wait_pop(Entry& entry)
{
thread_scoped_lock queue_lock(queue_mutex);
while(queue.empty() && !do_exit)
queue_cond.wait(queue_lock);
if(queue.empty()) {
assert(do_exit);
return false;
}
entry = queue.front();
queue.pop_front();
return true;
}
void TaskScheduler::thread_run(int thread_id)
{
Entry entry;
/* todo: test affinity/denormal mask */
/* keep popping off tasks */
while(thread_wait_pop(entry)) {
/* run task */
entry.task->run();
/* delete task */
delete entry.task;
/* notify pool task was done */
{
/* not called from TaskPool, have to explicitly lock the mutex here */
thread_scoped_lock num_lock(entry.pool->num_mutex);
entry.pool->num_decrease(1);
}
}
}
void TaskScheduler::push(Entry& entry, bool front)
{
entry.pool->num_increase();
/* add entry to queue */
TaskScheduler::queue_mutex.lock();
if(front)
TaskScheduler::queue.push_front(entry);
else
TaskScheduler::queue.push_back(entry);
TaskScheduler::queue_cond.notify_one();
TaskScheduler::queue_mutex.unlock();
}
void TaskScheduler::clear(TaskPool *pool)
{
thread_scoped_lock queue_lock(TaskScheduler::queue_mutex);
/* erase all tasks from this pool from the queue */
list<Entry>::iterator it = queue.begin();
int done = 0;
while(it != queue.end()) {
Entry& entry = *it;
if(entry.pool == pool) {
done++;
delete entry.task;
it = queue.erase(it);
}
else
it++;
}
queue_lock.unlock();
/* notify done */
pool->num_decrease(done);
}
CCL_NAMESPACE_END