Cleanup: Curves draw cache renaming, use references

Also change some remaining cases of "hair object" to "curves object".
This commit is contained in:
Hans Goudey 2022-03-30 18:25:06 -05:00
parent 4edde244da
commit 8466fd4bd1
5 changed files with 130 additions and 135 deletions

@ -26,7 +26,7 @@ def geometry_node_group_empty_new():
def geometry_modifier_poll(context): def geometry_modifier_poll(context):
ob = context.object ob = context.object
# Test object support for geometry node modifier (No hair object support yet) # Test object support for geometry node modifier (No curves object support yet)
if not ob or ob.type not in {'MESH', 'POINTCLOUD', 'VOLUME', 'CURVE', 'FONT'}: if not ob or ob.type not in {'MESH', 'POINTCLOUD', 'VOLUME', 'CURVE', 'FONT'}:
return False return False

@ -4000,7 +4000,7 @@ void BKE_object_foreach_display_point(Object *ob,
void (*func_cb)(const float[3], void *), void (*func_cb)(const float[3], void *),
void *user_data) void *user_data)
{ {
/* TODO: pointcloud and hair objects support */ /* TODO: pointcloud and curves object support */
const Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob); const Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
float co[3]; float co[3];

@ -4,7 +4,7 @@
/** \file /** \file
* \ingroup draw * \ingroup draw
* *
* \brief Hair API for render engines * \brief Curves API for render engines
*/ */
#include <cstring> #include <cstring>
@ -35,33 +35,29 @@ using blender::float3;
using blender::IndexRange; using blender::IndexRange;
using blender::Span; using blender::Span;
static void curves_batch_cache_clear(Curves *curves);
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
/* Hair GPUBatch Cache */ /* Curves GPUBatch Cache */
struct HairBatchCache { struct CurvesBatchCache {
ParticleHairCache hair; ParticleHairCache hair;
/* settings to determine if cache is invalid */ /* To determine if cache is invalid. */
bool is_dirty; bool is_dirty;
}; };
/* GPUBatch cache management. */ static bool curves_batch_cache_valid(const Curves &curves)
static bool curves_batch_cache_valid(Curves *curves)
{ {
HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache); const CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
return (cache && cache->is_dirty == false); return (cache && cache->is_dirty == false);
} }
static void curves_batch_cache_init(Curves *curves) static void curves_batch_cache_init(Curves &curves)
{ {
HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache); CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
if (!cache) { if (!cache) {
cache = MEM_cnew<HairBatchCache>(__func__); cache = MEM_cnew<CurvesBatchCache>(__func__);
curves->batch_cache = cache; curves.batch_cache = cache;
} }
else { else {
memset(cache, 0, sizeof(*cache)); memset(cache, 0, sizeof(*cache));
@ -70,23 +66,33 @@ static void curves_batch_cache_init(Curves *curves)
cache->is_dirty = false; cache->is_dirty = false;
} }
static void curves_batch_cache_clear(Curves &curves)
{
CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
if (!cache) {
return;
}
particle_batch_cache_clear_hair(&cache->hair);
}
void DRW_curves_batch_cache_validate(Curves *curves) void DRW_curves_batch_cache_validate(Curves *curves)
{ {
if (!curves_batch_cache_valid(curves)) { if (!curves_batch_cache_valid(*curves)) {
curves_batch_cache_clear(curves); curves_batch_cache_clear(*curves);
curves_batch_cache_init(curves); curves_batch_cache_init(*curves);
} }
} }
static HairBatchCache *curves_batch_cache_get(Curves *curves) static CurvesBatchCache &curves_batch_cache_get(Curves &curves)
{ {
DRW_curves_batch_cache_validate(curves); DRW_curves_batch_cache_validate(&curves);
return static_cast<HairBatchCache *>(curves->batch_cache); return *static_cast<CurvesBatchCache *>(curves.batch_cache);
} }
void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode) void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
{ {
HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache); CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache);
if (cache == nullptr) { if (cache == nullptr) {
return; return;
} }
@ -99,52 +105,42 @@ void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
} }
} }
static void curves_batch_cache_clear(Curves *curves)
{
HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache);
if (!cache) {
return;
}
particle_batch_cache_clear_hair(&cache->hair);
}
void DRW_curves_batch_cache_free(Curves *curves) void DRW_curves_batch_cache_free(Curves *curves)
{ {
curves_batch_cache_clear(curves); curves_batch_cache_clear(*curves);
MEM_SAFE_FREE(curves->batch_cache); MEM_SAFE_FREE(curves->batch_cache);
} }
static void ensure_seg_pt_count(Curves *curves, ParticleHairCache *curves_cache) static void ensure_seg_pt_count(const Curves &curves, ParticleHairCache &curves_cache)
{ {
if ((curves_cache->pos != nullptr && curves_cache->indices != nullptr) || if ((curves_cache.pos != nullptr && curves_cache.indices != nullptr) ||
(curves_cache->proc_point_buf != nullptr)) { (curves_cache.proc_point_buf != nullptr)) {
return; return;
} }
curves_cache->strands_len = curves->geometry.curve_size; curves_cache.strands_len = curves.geometry.curve_size;
curves_cache->elems_len = curves->geometry.point_size + curves->geometry.curve_size; curves_cache.elems_len = curves.geometry.point_size + curves.geometry.curve_size;
curves_cache->point_len = curves->geometry.point_size; curves_cache.point_len = curves.geometry.point_size;
} }
static void curves_batch_cache_fill_segments_proc_pos(Curves *curves, static void curves_batch_cache_fill_segments_proc_pos(const Curves &curves_id,
GPUVertBufRaw *attr_step, GPUVertBufRaw &attr_step,
GPUVertBufRaw *length_step) GPUVertBufRaw &length_step)
{ {
/* TODO: use hair radius layer if available. */ /* TODO: use hair radius layer if available. */
const int curve_size = curves->geometry.curve_size; const int curve_size = curves_id.geometry.curve_size;
const blender::bke::CurvesGeometry &geometry = blender::bke::CurvesGeometry::wrap( const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap(
curves->geometry); curves_id.geometry);
Span<float3> positions = geometry.positions(); Span<float3> positions = curves.positions();
for (const int i : IndexRange(curve_size)) { for (const int i : IndexRange(curve_size)) {
const IndexRange curve_range = geometry.points_for_curve(i); const IndexRange curve_range = curves.points_for_curve(i);
Span<float3> curve_positions = positions.slice(curve_range); Span<float3> curve_positions = positions.slice(curve_range);
float total_len = 0.0f; float total_len = 0.0f;
float *seg_data_first; float *seg_data_first;
for (const int i_curve : curve_positions.index_range()) { for (const int i_curve : curve_positions.index_range()) {
float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step); float *seg_data = (float *)GPU_vertbuf_raw_step(&attr_step);
copy_v3_v3(seg_data, curve_positions[i_curve]); copy_v3_v3(seg_data, curve_positions[i_curve]);
if (i_curve == 0) { if (i_curve == 0) {
seg_data_first = seg_data; seg_data_first = seg_data;
@ -156,7 +152,7 @@ static void curves_batch_cache_fill_segments_proc_pos(Curves *curves,
seg_data[3] = total_len; seg_data[3] = total_len;
} }
/* Assign length value. */ /* Assign length value. */
*(float *)GPU_vertbuf_raw_step(length_step) = total_len; *(float *)GPU_vertbuf_raw_step(&length_step) = total_len;
if (total_len > 0.0f) { if (total_len > 0.0f) {
/* Divide by total length to have a [0-1] number. */ /* Divide by total length to have a [0-1] number. */
for ([[maybe_unused]] const int i_curve : curve_positions.index_range()) { for ([[maybe_unused]] const int i_curve : curve_positions.index_range()) {
@ -167,67 +163,67 @@ static void curves_batch_cache_fill_segments_proc_pos(Curves *curves,
} }
} }
static void curves_batch_cache_ensure_procedural_pos(Curves *curves, static void curves_batch_cache_ensure_procedural_pos(Curves &curves,
ParticleHairCache *cache, ParticleHairCache &cache,
GPUMaterial *gpu_material) GPUMaterial *gpu_material)
{ {
if (cache->proc_point_buf == nullptr) { if (cache.proc_point_buf == nullptr) {
/* initialize vertex format */ /* Initialize vertex format. */
GPUVertFormat format = {0}; GPUVertFormat format = {0};
uint pos_id = GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT); uint pos_id = GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
cache->proc_point_buf = GPU_vertbuf_create_with_format(&format); cache.proc_point_buf = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache->proc_point_buf, cache->point_len); GPU_vertbuf_data_alloc(cache.proc_point_buf, cache.point_len);
GPUVertBufRaw point_step; GPUVertBufRaw point_step;
GPU_vertbuf_attr_get_raw_data(cache->proc_point_buf, pos_id, &point_step); GPU_vertbuf_attr_get_raw_data(cache.proc_point_buf, pos_id, &point_step);
GPUVertFormat length_format = {0}; GPUVertFormat length_format = {0};
uint length_id = GPU_vertformat_attr_add( uint length_id = GPU_vertformat_attr_add(
&length_format, "hairLength", GPU_COMP_F32, 1, GPU_FETCH_FLOAT); &length_format, "hairLength", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
cache->proc_length_buf = GPU_vertbuf_create_with_format(&length_format); cache.proc_length_buf = GPU_vertbuf_create_with_format(&length_format);
GPU_vertbuf_data_alloc(cache->proc_length_buf, cache->strands_len); GPU_vertbuf_data_alloc(cache.proc_length_buf, cache.strands_len);
GPUVertBufRaw length_step; GPUVertBufRaw length_step;
GPU_vertbuf_attr_get_raw_data(cache->proc_length_buf, length_id, &length_step); GPU_vertbuf_attr_get_raw_data(cache.proc_length_buf, length_id, &length_step);
curves_batch_cache_fill_segments_proc_pos(curves, &point_step, &length_step); curves_batch_cache_fill_segments_proc_pos(curves, point_step, length_step);
/* Create vbo immediately to bind to texture buffer. */ /* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_point_buf); GPU_vertbuf_use(cache.proc_point_buf);
cache->point_tex = GPU_texture_create_from_vertbuf("hair_point", cache->proc_point_buf); cache.point_tex = GPU_texture_create_from_vertbuf("hair_point", cache.proc_point_buf);
} }
if (gpu_material && cache->proc_length_buf != nullptr && cache->length_tex) { if (gpu_material && cache.proc_length_buf != nullptr && cache.length_tex) {
ListBase gpu_attrs = GPU_material_attributes(gpu_material); ListBase gpu_attrs = GPU_material_attributes(gpu_material);
LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &gpu_attrs) { LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &gpu_attrs) {
if (attr->type == CD_HAIRLENGTH) { if (attr->type == CD_HAIRLENGTH) {
GPU_vertbuf_use(cache->proc_length_buf); GPU_vertbuf_use(cache.proc_length_buf);
cache->length_tex = GPU_texture_create_from_vertbuf("hair_length", cache->proc_length_buf); cache.length_tex = GPU_texture_create_from_vertbuf("hair_length", cache.proc_length_buf);
break; break;
} }
} }
} }
} }
static void curves_batch_cache_fill_strands_data(Curves *curves, static void curves_batch_cache_fill_strands_data(const Curves &curves_id,
GPUVertBufRaw *data_step, GPUVertBufRaw &data_step,
GPUVertBufRaw *seg_step) GPUVertBufRaw &seg_step)
{ {
const blender::bke::CurvesGeometry &geometry = blender::bke::CurvesGeometry::wrap( const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap(
curves->geometry); curves_id.geometry);
for (const int i : IndexRange(geometry.curves_num())) { for (const int i : IndexRange(curves.curves_num())) {
const IndexRange curve_range = geometry.points_for_curve(i); const IndexRange curve_range = curves.points_for_curve(i);
*(uint *)GPU_vertbuf_raw_step(data_step) = curve_range.start(); *(uint *)GPU_vertbuf_raw_step(&data_step) = curve_range.start();
*(ushort *)GPU_vertbuf_raw_step(seg_step) = curve_range.size() - 1; *(ushort *)GPU_vertbuf_raw_step(&seg_step) = curve_range.size() - 1;
} }
} }
static void curves_batch_cache_ensure_procedural_strand_data(Curves *curves, static void curves_batch_cache_ensure_procedural_strand_data(Curves &curves,
ParticleHairCache *cache) ParticleHairCache &cache)
{ {
GPUVertBufRaw data_step, seg_step; GPUVertBufRaw data_step, seg_step;
@ -237,77 +233,76 @@ static void curves_batch_cache_ensure_procedural_strand_data(Curves *curves,
GPUVertFormat format_seg = {0}; GPUVertFormat format_seg = {0};
uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT); uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
/* Strand Data */ /* Curve Data. */
cache->proc_strand_buf = GPU_vertbuf_create_with_format(&format_data); cache.proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
GPU_vertbuf_data_alloc(cache->proc_strand_buf, cache->strands_len); GPU_vertbuf_data_alloc(cache.proc_strand_buf, cache.strands_len);
GPU_vertbuf_attr_get_raw_data(cache->proc_strand_buf, data_id, &data_step); GPU_vertbuf_attr_get_raw_data(cache.proc_strand_buf, data_id, &data_step);
cache->proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg); cache.proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg);
GPU_vertbuf_data_alloc(cache->proc_strand_seg_buf, cache->strands_len); GPU_vertbuf_data_alloc(cache.proc_strand_seg_buf, cache.strands_len);
GPU_vertbuf_attr_get_raw_data(cache->proc_strand_seg_buf, seg_id, &seg_step); GPU_vertbuf_attr_get_raw_data(cache.proc_strand_seg_buf, seg_id, &seg_step);
curves_batch_cache_fill_strands_data(curves, &data_step, &seg_step); curves_batch_cache_fill_strands_data(curves, data_step, seg_step);
/* Create vbo immediately to bind to texture buffer. */ /* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_strand_buf); GPU_vertbuf_use(cache.proc_strand_buf);
cache->strand_tex = GPU_texture_create_from_vertbuf("curves_strand", cache->proc_strand_buf); cache.strand_tex = GPU_texture_create_from_vertbuf("curves_strand", cache.proc_strand_buf);
GPU_vertbuf_use(cache->proc_strand_seg_buf); GPU_vertbuf_use(cache.proc_strand_seg_buf);
cache->strand_seg_tex = GPU_texture_create_from_vertbuf("curves_strand_seg", cache.strand_seg_tex = GPU_texture_create_from_vertbuf("curves_strand_seg",
cache->proc_strand_seg_buf); cache.proc_strand_seg_buf);
} }
static void curves_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache, int subdiv) static void curves_batch_cache_ensure_procedural_final_points(ParticleHairCache &cache, int subdiv)
{ {
/* Same format as point_tex. */ /* Same format as point_tex. */
GPUVertFormat format = {0}; GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT); GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
cache->final[subdiv].proc_buf = GPU_vertbuf_create_with_format_ex(&format, cache.final[subdiv].proc_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_DEVICE_ONLY);
GPU_USAGE_DEVICE_ONLY);
/* Create a destination buffer for the transform feedback. Sized appropriately */ /* Create a destination buffer for the transform feedback. Sized appropriately */
/* Those are points! not line segments. */ /* Those are points! not line segments. */
GPU_vertbuf_data_alloc(cache->final[subdiv].proc_buf, GPU_vertbuf_data_alloc(cache.final[subdiv].proc_buf,
cache->final[subdiv].strands_res * cache->strands_len); cache.final[subdiv].strands_res * cache.strands_len);
/* Create vbo immediately to bind to texture buffer. */ /* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->final[subdiv].proc_buf); GPU_vertbuf_use(cache.final[subdiv].proc_buf);
cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("hair_proc", cache.final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("hair_proc",
cache->final[subdiv].proc_buf); cache.final[subdiv].proc_buf);
} }
static void curves_batch_cache_fill_segments_indices(Curves *curves, static void curves_batch_cache_fill_segments_indices(const Curves &curves,
const int res, const int res,
GPUIndexBufBuilder *elb) GPUIndexBufBuilder &elb)
{ {
const int curve_size = curves->geometry.curve_size; const int curves_num = curves.geometry.curve_size;
uint curr_point = 0; uint curr_point = 0;
for ([[maybe_unused]] const int i : IndexRange(curve_size)) { for ([[maybe_unused]] const int i : IndexRange(curves_num)) {
for (int k = 0; k < res; k++) { for (int k = 0; k < res; k++) {
GPU_indexbuf_add_generic_vert(elb, curr_point++); GPU_indexbuf_add_generic_vert(&elb, curr_point++);
} }
GPU_indexbuf_add_primitive_restart(elb); GPU_indexbuf_add_primitive_restart(&elb);
} }
} }
static void curves_batch_cache_ensure_procedural_indices(Curves *curves, static void curves_batch_cache_ensure_procedural_indices(Curves &curves,
ParticleHairCache *cache, ParticleHairCache &cache,
int thickness_res, const int thickness_res,
int subdiv) const int subdiv)
{ {
BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */ BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
if (cache->final[subdiv].proc_hairs[thickness_res - 1] != nullptr) { if (cache.final[subdiv].proc_hairs[thickness_res - 1] != nullptr) {
return; return;
} }
int verts_per_hair = cache->final[subdiv].strands_res * thickness_res; int verts_per_curve = cache.final[subdiv].strands_res * thickness_res;
/* +1 for primitive restart */ /* +1 for primitive restart */
int element_count = (verts_per_hair + 1) * cache->strands_len; int element_count = (verts_per_curve + 1) * cache.strands_len;
GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP; GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
static GPUVertFormat format = {0}; static GPUVertFormat format = {0};
@ -322,46 +317,46 @@ static void curves_batch_cache_ensure_procedural_indices(Curves *curves,
GPUIndexBufBuilder elb; GPUIndexBufBuilder elb;
GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count); GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count);
curves_batch_cache_fill_segments_indices(curves, verts_per_hair, &elb); curves_batch_cache_fill_segments_indices(curves, verts_per_curve, elb);
cache->final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex( cache.final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX); prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
} }
bool hair_ensure_procedural_data(Object *object, bool curves_ensure_procedural_data(Object *object,
ParticleHairCache **r_hair_cache, ParticleHairCache **r_hair_cache,
GPUMaterial *gpu_material, GPUMaterial *gpu_material,
int subdiv, const int subdiv,
int thickness_res) const int thickness_res)
{ {
bool need_ft_update = false; bool need_ft_update = false;
Curves *curves = static_cast<Curves *>(object->data); Curves &curves = *static_cast<Curves *>(object->data);
HairBatchCache *cache = curves_batch_cache_get(curves); CurvesBatchCache &cache = curves_batch_cache_get(curves);
*r_hair_cache = &cache->hair; *r_hair_cache = &cache.hair;
const int steps = 3; /* TODO: don't hard-code? */ const int steps = 3; /* TODO: don't hard-code? */
(*r_hair_cache)->final[subdiv].strands_res = 1 << (steps + subdiv); (*r_hair_cache)->final[subdiv].strands_res = 1 << (steps + subdiv);
/* Refreshed on combing and simulation. */ /* Refreshed on combing and simulation. */
if ((*r_hair_cache)->proc_point_buf == nullptr) { if ((*r_hair_cache)->proc_point_buf == nullptr) {
ensure_seg_pt_count(curves, &cache->hair); ensure_seg_pt_count(curves, cache.hair);
curves_batch_cache_ensure_procedural_pos(curves, &cache->hair, gpu_material); curves_batch_cache_ensure_procedural_pos(curves, cache.hair, gpu_material);
need_ft_update = true; need_ft_update = true;
} }
/* Refreshed if active layer or custom data changes. */ /* Refreshed if active layer or custom data changes. */
if ((*r_hair_cache)->strand_tex == nullptr) { if ((*r_hair_cache)->strand_tex == nullptr) {
curves_batch_cache_ensure_procedural_strand_data(curves, &cache->hair); curves_batch_cache_ensure_procedural_strand_data(curves, cache.hair);
} }
/* Refreshed only on subdiv count change. */ /* Refreshed only on subdiv count change. */
if ((*r_hair_cache)->final[subdiv].proc_buf == nullptr) { if ((*r_hair_cache)->final[subdiv].proc_buf == nullptr) {
curves_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv); curves_batch_cache_ensure_procedural_final_points(cache.hair, subdiv);
need_ft_update = true; need_ft_update = true;
} }
if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) { if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) {
curves_batch_cache_ensure_procedural_indices(curves, &cache->hair, thickness_res, subdiv); curves_batch_cache_ensure_procedural_indices(curves, cache.hair, thickness_res, subdiv);
} }
return need_ft_update; return need_ft_update;

@ -172,8 +172,8 @@ static ParticleHairCache *drw_hair_particle_cache_get(Object *object,
object, psys, md, &cache, gpu_material, subdiv, thickness_res); object, psys, md, &cache, gpu_material, subdiv, thickness_res);
} }
else { else {
/* New hair object. */ /* New curves object. */
update = hair_ensure_procedural_data(object, &cache, gpu_material, subdiv, thickness_res); update = curves_ensure_procedural_data(object, &cache, gpu_material, subdiv, thickness_res);
} }
if (update) { if (update) {
@ -230,7 +230,7 @@ void DRW_hair_duplimat_get(Object *object,
} }
} }
else { else {
/* New hair object. */ /* New curves object. */
copy_m4_m4(dupli_mat, object->obmat); copy_m4_m4(dupli_mat, object->obmat);
} }
} }
@ -291,7 +291,7 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
hair_close_tip = (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0; hair_close_tip = (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0;
} }
else { else {
/* TODO: implement for new hair object. */ /* TODO: implement for new curves object. */
hair_rad_shape = 1.0f; hair_rad_shape = 1.0f;
hair_rad_root = 0.005f; hair_rad_root = 0.005f;
hair_rad_tip = 0.0f; hair_rad_tip = 0.0f;

@ -91,11 +91,11 @@ bool particles_ensure_procedural_data(struct Object *object,
/** /**
* Ensure all textures and buffers needed for GPU accelerated drawing. * Ensure all textures and buffers needed for GPU accelerated drawing.
*/ */
bool hair_ensure_procedural_data(struct Object *object, bool curves_ensure_procedural_data(struct Object *object,
struct ParticleHairCache **r_hair_cache, struct ParticleHairCache **r_hair_cache,
struct GPUMaterial *gpu_material, struct GPUMaterial *gpu_material,
int subdiv, int subdiv,
int thickness_res); int thickness_res);
#ifdef __cplusplus #ifdef __cplusplus
} }