Cycles: require Experimental to be set to enable CUDA on cards with shader model

lower than 1.3, since we're not officially supporting these. We're already not
providing CUDA binaries for these, so better make it clear when compiling from
source too.
This commit is contained in:
Brecht Van Lommel 2011-12-12 22:51:35 +00:00
parent 94bc2b0cff
commit 9e01abf777
7 changed files with 32 additions and 11 deletions

@ -257,7 +257,7 @@ SessionParams BlenderSync::get_session_params(BL::Scene b_scene, bool background
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
/* feature set */
bool experimental = (RNA_enum_get(&cscene, "feature_set") != 0);
params.experimental = (RNA_enum_get(&cscene, "feature_set") != 0);
/* device type */
params.device_type = DEVICE_CPU;
@ -266,14 +266,14 @@ SessionParams BlenderSync::get_session_params(BL::Scene b_scene, bool background
vector<DeviceType> types = Device::available_types();
DeviceType dtype;
if(!experimental || RNA_enum_get(&cscene, "gpu_type") == 0)
if(!params.experimental || RNA_enum_get(&cscene, "gpu_type") == 0)
dtype = DEVICE_CUDA;
else
dtype = DEVICE_OPENCL;
if(device_type_available(types, dtype))
params.device_type = dtype;
else if(experimental && device_type_available(types, DEVICE_OPENCL))
else if(params.experimental && device_type_available(types, DEVICE_OPENCL))
params.device_type = DEVICE_OPENCL;
else if(device_type_available(types, DEVICE_CUDA))
params.device_type = DEVICE_CUDA;

@ -112,7 +112,7 @@ public:
virtual void *osl_memory() { return NULL; }
/* load/compile kernels, must be called before adding tasks */
virtual bool load_kernels() { return true; }
virtual bool load_kernels(bool experimental) { return true; }
/* tasks */
virtual void task_add(DeviceTask& task) = 0;

@ -214,6 +214,21 @@ public:
return string("CUDA ") + deviceName;
}
bool support_device(bool experimental)
{
if(!experimental) {
int major, minor;
cuDeviceComputeCapability(&major, &minor, cuDevId);
if(major <= 1 && minor <= 2) {
cuda_error(string_printf("CUDA device supported only with shader model 1.3 or up, found %d.%d.", major, minor));
return false;
}
}
return true;
}
string compile_kernel()
{
/* compute cubin name */
@ -236,11 +251,11 @@ public:
if(path_exists(cubin))
return cubin;
#ifdef WITH_CUDA_BINARIES
#if defined(WITH_CUDA_BINARIES) && defined(_WIN32)
if(major <= 1 && minor <= 2)
cuda_error(string_printf("CUDA device supported only with shader model 1.3 or up, found %d.%d.", major, minor));
else
cuda_error("CUDA binary kernel for this graphics card not found.");
cuda_error("CUDA binary kernel for this graphics card shader model (%d.%d) not found.", major, minor);
return "";
#else
/* if not, find CUDA compiler */
@ -283,12 +298,15 @@ public:
#endif
}
bool load_kernels()
bool load_kernels(bool experimental)
{
/* check if cuda init succeeded */
if(cuContext == 0)
return false;
if(!support_device(experimental))
return false;
/* get kernel */
string cubin = compile_kernel();

@ -132,10 +132,10 @@ public:
return desc.str();
}
bool load_kernels()
bool load_kernels(bool experimental)
{
foreach(SubDevice& sub, devices)
if(!sub.device->load_kernels())
if(!sub.device->load_kernels(experimental))
return false;
return true;

@ -365,7 +365,7 @@ public:
return md5.get_hex();
}
bool load_kernels()
bool load_kernels(bool experimental)
{
/* verify if device was initialized */
if(!device_initialized) {

@ -410,7 +410,7 @@ void Session::run()
/* load kernels */
progress.set_status("Loading render kernels (may take a few minutes the first time)");
if(!device->load_kernels()) {
if(!device->load_kernels(params.experimental)) {
string message = device->error_message();
if(message == "")
message = "Failed loading render kernel, see console for errors";

@ -43,6 +43,7 @@ public:
string output_path;
bool progressive;
bool experimental;
int samples;
int tile_size;
int min_size;
@ -59,6 +60,7 @@ public:
output_path = "";
progressive = false;
experimental = false;
samples = INT_MAX;
tile_size = 64;
min_size = 64;
@ -75,6 +77,7 @@ public:
&& output_path == params.output_path
/* && samples == params.samples */
&& progressive == params.progressive
&& experimental == params.experimental
&& tile_size == params.tile_size
&& min_size == params.min_size
&& threads == params.threads