Revert "Fix #104850: Create Geometry Nodes operators fails if not in English"

This reverts commit 68181c2560db25c1bd2b70beccf6022c5aa00f39.

I merged 3.6 into 3.5 by mistake. Basically I had a PR against main,
 then changed it in the last minute to be against 3.5 via the
 web-interface unaware that I shouldn't do it without updating the
 patch.

 Original Pull Request: #104889
This commit is contained in:
Dalai Felinto 2023-02-17 18:45:42 +01:00
parent 68181c2560
commit 4ec9aff2af
89 changed files with 862 additions and 1822 deletions

8
.arcconfig Normal file

@ -0,0 +1,8 @@
{
"project_id" : "Blender",
"conduit_uri" : "https://developer.blender.org/",
"phabricator.uri" : "https://developer.blender.org/",
"git.default-relative-commit" : "origin/master",
"arc.land.update.default" : "rebase",
"arc.land.onto.default" : "master"
}

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
## Update and uncomment this in the release branch
# set(BLENDER_VERSION 3.1)
set(BLENDER_VERSION 3.5)
function(download_source dep)
set(TARGET_FILE ${${dep}_FILE})

@ -10,7 +10,7 @@ ExternalProject_Add(external_epoxy
URL_HASH ${EPOXY_HASH_TYPE}=${EPOXY_HASH}
PREFIX ${BUILD_DIR}/epoxy
PATCH_COMMAND ${PATCH_CMD} -p 1 -N -d ${BUILD_DIR}/epoxy/src/external_epoxy/ < ${PATCH_DIR}/epoxy.diff
CONFIGURE_COMMAND ${CONFIGURE_ENV} && ${MESON} setup --prefix ${LIBDIR}/epoxy --default-library ${EPOXY_LIB_TYPE} --libdir lib ${BUILD_DIR}/epoxy/src/external_epoxy-build ${BUILD_DIR}/epoxy/src/external_epoxy -Dtests=false ${MESON_BUILD_TYPE}
CONFIGURE_COMMAND ${CONFIGURE_ENV} && ${MESON} setup --prefix ${LIBDIR}/epoxy --default-library ${EPOXY_LIB_TYPE} --libdir lib ${BUILD_DIR}/epoxy/src/external_epoxy-build ${BUILD_DIR}/epoxy/src/external_epoxy -Dtests=false
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

@ -9,7 +9,7 @@ ExternalProject_Add(external_fribidi
URL_HASH ${FRIBIDI_HASH_TYPE}=${FRIBIDI_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/fribidi
CONFIGURE_COMMAND ${MESON} setup --prefix ${LIBDIR}/fribidi ${MESON_BUILD_TYPE} -Ddocs=false --default-library static --libdir lib ${BUILD_DIR}/fribidi/src/external_fribidi-build ${BUILD_DIR}/fribidi/src/external_fribidi
CONFIGURE_COMMAND ${MESON} setup --prefix ${LIBDIR}/fribidi -Ddocs=false --default-library static --libdir lib ${BUILD_DIR}/fribidi/src/external_fribidi-build ${BUILD_DIR}/fribidi/src/external_fribidi
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
INSTALL_DIR ${LIBDIR}/fribidi

@ -21,7 +21,6 @@ set(HARFBUZZ_EXTRA_OPTIONS
# Only used for command line utilities,
# disable as this would add an addition & unnecessary build-dependency.
-Dcairo=disabled
${MESON_BUILD_TYPE}
)
ExternalProject_Add(external_harfbuzz
@ -60,10 +59,3 @@ if(BUILD_MODE STREQUAL Release AND WIN32)
DEPENDEES install
)
endif()
if(BUILD_MODE STREQUAL Debug AND WIN32)
ExternalProject_Add_Step(external_harfbuzz after_install
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/harfbuzz/lib/libharfbuzz.a ${HARVEST_TARGET}/harfbuzz/lib/libharfbuzz_d.lib
DEPENDEES install
)
endif()

@ -15,7 +15,7 @@ llvm-config = '${LIBDIR}/llvm/bin/llvm-config'"
)
set(MESA_EXTRA_FLAGS
${MESON_BUILD_TYPE}
-Dbuildtype=release
-Dc_args=${MESA_CFLAGS}
-Dcpp_args=${MESA_CXXFLAGS}
-Dc_link_args=${MESA_LDFLAGS}

@ -16,10 +16,8 @@ message("BuildMode = ${BUILD_MODE}")
if(BUILD_MODE STREQUAL "Debug")
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Debug)
set(MESON_BUILD_TYPE -Dbuildtype=debug)
else()
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Release)
set(MESON_BUILD_TYPE -Dbuildtype=release)
endif()
set(DOWNLOAD_DIR "${CMAKE_CURRENT_BINARY_DIR}/downloads" CACHE STRING "Path for downloaded files")

@ -13,7 +13,7 @@ ExternalProject_Add(external_wayland
# NOTE: `-lm` is needed for `libxml2` which is a static library that uses `libm.so`,
# without this, math symbols such as `floor` aren't found.
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env PKG_CONFIG_PATH=${LIBDIR}/expat/lib/pkgconfig:${LIBDIR}/xml2/lib/pkgconfig:${LIBDIR}/ffi/lib/pkgconfig:$PKG_CONFIG_PATH
${MESON} --prefix ${LIBDIR}/wayland ${MESON_BUILD_TYPE} -Ddocumentation=false -Dtests=false -D "c_link_args=-L${LIBDIR}/ffi/lib -lm" . ../external_wayland
${MESON} --prefix ${LIBDIR}/wayland -Ddocumentation=false -Dtests=false -D "c_link_args=-L${LIBDIR}/ffi/lib -lm" . ../external_wayland
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

@ -7,7 +7,7 @@ ExternalProject_Add(external_wayland_protocols
PREFIX ${BUILD_DIR}/wayland-protocols
# Use `-E` so the `PKG_CONFIG_PATH` can be defined to link against our own WAYLAND.
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env PKG_CONFIG_PATH=${LIBDIR}/wayland/lib64/pkgconfig:$PKG_CONFIG_PATH
${MESON} --prefix ${LIBDIR}/wayland-protocols ${MESON_BUILD_TYPE} . ../external_wayland_protocols -Dtests=false
${MESON} --prefix ${LIBDIR}/wayland-protocols . ../external_wayland_protocols -Dtests=false
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

@ -85,7 +85,7 @@ if(NOT APPLE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_HIP_BINARIES OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_HIP_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_ONEAPI ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_ONEAPI_BINARIES ON CACHE BOOL "" FORCE)
endif()

@ -5,38 +5,38 @@
update-code:
git:
submodules:
- branch: main
- branch: blender-v3.5-release
commit_id: HEAD
path: release/scripts/addons
- branch: main
- branch: blender-v3.5-release
commit_id: HEAD
path: release/scripts/addons_contrib
- branch: main
- branch: blender-v3.5-release
commit_id: HEAD
path: release/datafiles/locale
- branch: main
- branch: blender-v3.5-release
commit_id: HEAD
path: source/tools
svn:
libraries:
darwin-arm64:
branch: trunk
branch: tags/blender-3.5-release
commit_id: HEAD
path: lib/darwin_arm64
darwin-x86_64:
branch: trunk
branch: tags/blender-3.5-release
commit_id: HEAD
path: lib/darwin
linux-x86_64:
branch: trunk
branch: tags/blender-3.5-release
commit_id: HEAD
path: lib/linux_x86_64_glibc_228
windows-amd64:
branch: trunk
branch: tags/blender-3.5-release
commit_id: HEAD
path: lib/win64_vc15
tests:
branch: trunk
branch: tags/blender-3.5-release
commit_id: HEAD
path: lib/tests
benchmarks:

@ -38,7 +38,7 @@ PROJECT_NAME = Blender
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = V3.6
PROJECT_NUMBER = V3.5
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a

@ -1676,20 +1676,17 @@ class CyclesPreferences(bpy.types.AddonPreferences):
col.label(text="and NVIDIA driver version %s or newer" % driver_version,
icon='BLANK1', translate=False)
elif device_type == 'HIP':
if True:
col.label(text="HIP temporarily disabled due to compiler bugs", icon='BLANK1')
else:
import sys
if sys.platform[:3] == "win":
driver_version = "21.Q4"
col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text=iface_("and AMD Radeon Pro %s driver or newer") % driver_version,
icon='BLANK1', translate=False)
elif sys.platform.startswith("linux"):
driver_version = "22.10"
col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text=iface_("and AMD driver version %s or newer") % driver_version, icon='BLANK1',
translate=False)
import sys
if sys.platform[:3] == "win":
driver_version = "21.Q4"
col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text=iface_("and AMD Radeon Pro %s driver or newer") % driver_version,
icon='BLANK1', translate=False)
elif sys.platform.startswith("linux"):
driver_version = "22.10"
col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text=iface_("and AMD driver version %s or newer") % driver_version, icon='BLANK1',
translate=False)
elif device_type == 'ONEAPI':
import sys
if sys.platform.startswith("win"):

@ -42,15 +42,12 @@ endif()
###########################################################################
if(WITH_CYCLES_HIP_BINARIES AND WITH_CYCLES_DEVICE_HIP)
set(WITH_CYCLES_HIP_BINARIES OFF)
message(STATUS "HIP temporarily disabled due to compiler bugs")
find_package(HIP)
set_and_warn_library_found("HIP compiler" HIP_FOUND WITH_CYCLES_HIP_BINARIES)
# find_package(HIP)
# set_and_warn_library_found("HIP compiler" HIP_FOUND WITH_CYCLES_HIP_BINARIES)
# if(HIP_FOUND)
# message(STATUS "Found HIP ${HIP_HIPCC_EXECUTABLE} (${HIP_VERSION})")
# endif()
if(HIP_FOUND)
message(STATUS "Found HIP ${HIP_HIPCC_EXECUTABLE} (${HIP_VERSION})")
endif()
endif()
if(NOT WITH_HIP_DYNLOAD)

@ -306,23 +306,14 @@ static void gwl_window_frame_update_from_pending(GWL_Window *win);
#ifdef USE_EVENT_BACKGROUND_THREAD
enum eGWL_PendingWindowActions {
/**
* The state of the window frame has changed, apply the state from #GWL_Window::frame_pending.
*/
PENDING_WINDOW_FRAME_CONFIGURE = 0,
/** The EGL buffer must be resized to match #GWL_WindowFrame::size. */
PENDING_EGL_WINDOW_RESIZE,
PENDING_FRAME_CONFIGURE = 0,
PENDING_EGL_RESIZE,
# ifdef GHOST_OPENGL_ALPHA
/** Draw an opaque region behind the window. */
PENDING_OPAQUE_SET,
# endif
/**
* The DPI for a monitor has changed or the monitors (outputs)
* this window is visible on may have changed. Recalculate the windows scale.
*/
PENDING_OUTPUT_SCALE_UPDATE,
PENDING_SCALE_UPDATE,
};
# define PENDING_NUM (PENDING_OUTPUT_SCALE_UPDATE + 1)
# define PENDING_NUM (PENDING_SCALE_UPDATE + 1)
static void gwl_window_pending_actions_tag(GWL_Window *win, enum eGWL_PendingWindowActions type)
{
@ -332,10 +323,10 @@ static void gwl_window_pending_actions_tag(GWL_Window *win, enum eGWL_PendingWin
static void gwl_window_pending_actions_handle(GWL_Window *win)
{
if (win->pending_actions[PENDING_WINDOW_FRAME_CONFIGURE].exchange(false)) {
if (win->pending_actions[PENDING_FRAME_CONFIGURE].exchange(false)) {
gwl_window_frame_update_from_pending(win);
}
if (win->pending_actions[PENDING_EGL_WINDOW_RESIZE].exchange(false)) {
if (win->pending_actions[PENDING_EGL_RESIZE].exchange(false)) {
wl_egl_window_resize(win->egl_window, UNPACK2(win->frame.size), 0, 0);
}
# ifdef GHOST_OPENGL_ALPHA
@ -343,7 +334,7 @@ static void gwl_window_pending_actions_handle(GWL_Window *win)
win->ghost_window->setOpaque();
}
# endif
if (win->pending_actions[PENDING_OUTPUT_SCALE_UPDATE].exchange(false)) {
if (win->pending_actions[PENDING_SCALE_UPDATE].exchange(false)) {
win->ghost_window->outputs_changed_update_scale();
}
}
@ -351,10 +342,9 @@ static void gwl_window_pending_actions_handle(GWL_Window *win)
#endif /* USE_EVENT_BACKGROUND_THREAD */
/**
* Update the window's #GWL_WindowFrame.
* The caller must handle locking & run from the main thread.
* Update the window's #GWL_WindowFrame
*/
static void gwl_window_frame_update_from_pending_no_lock(GWL_Window *win)
static void gwl_window_frame_update_from_pending_lockfree(GWL_Window *win)
{
#ifdef USE_EVENT_BACKGROUND_THREAD
GHOST_ASSERT(win->ghost_system->main_thread_id == std::this_thread::get_id(),
@ -391,7 +381,7 @@ static void gwl_window_frame_update_from_pending(GWL_Window *win)
#ifdef USE_EVENT_BACKGROUND_THREAD
std::lock_guard lock_frame_guard{win->frame_pending_mutex};
#endif
gwl_window_frame_update_from_pending_no_lock(win);
gwl_window_frame_update_from_pending_lockfree(win);
}
/** \} */
@ -586,12 +576,12 @@ static void frame_handle_configure(struct libdecor_frame *frame,
GHOST_SystemWayland *system = win->ghost_system;
const bool is_main_thread = system->main_thread_id == std::this_thread::get_id();
if (!is_main_thread) {
gwl_window_pending_actions_tag(win, PENDING_WINDOW_FRAME_CONFIGURE);
gwl_window_pending_actions_tag(win, PENDING_FRAME_CONFIGURE);
}
else
# endif
{
gwl_window_frame_update_from_pending_no_lock(win);
gwl_window_frame_update_from_pending_lockfree(win);
}
}
}
@ -681,7 +671,7 @@ static void xdg_surface_handle_configure(void *data,
if (!is_main_thread) {
/* NOTE(@ideasman42): this only gets one redraw,
* I could not find a case where this causes problems. */
gwl_window_pending_actions_tag(win, PENDING_WINDOW_FRAME_CONFIGURE);
gwl_window_pending_actions_tag(win, PENDING_FRAME_CONFIGURE);
}
else
#endif
@ -1383,7 +1373,7 @@ bool GHOST_WindowWayland::outputs_changed_update_scale()
{
#ifdef USE_EVENT_BACKGROUND_THREAD
if (system_->main_thread_id != std::this_thread::get_id()) {
gwl_window_pending_actions_tag(window_, PENDING_OUTPUT_SCALE_UPDATE);
gwl_window_pending_actions_tag(window_, PENDING_SCALE_UPDATE);
return false;
}
#endif

@ -93,7 +93,7 @@ GHOST_WindowWin32::GHOST_WindowWin32(GHOST_SystemWin32 *system,
}
RECT win_rect = {left, top, long(left + width), long(top + height)};
adjustWindowRectForDesktop(&win_rect, style, extended_style);
adjustWindowRectForClosestMonitor(&win_rect, style, extended_style);
wchar_t *title_16 = alloc_utf16_from_8((char *)title, 0);
m_hWnd = ::CreateWindowExW(extended_style, /* window extended style */
@ -298,52 +298,24 @@ GHOST_WindowWin32::~GHOST_WindowWin32()
m_directManipulationHelper = NULL;
}
void GHOST_WindowWin32::adjustWindowRectForDesktop(LPRECT win_rect, DWORD dwStyle, DWORD dwExStyle)
void GHOST_WindowWin32::adjustWindowRectForClosestMonitor(LPRECT win_rect,
DWORD dwStyle,
DWORD dwExStyle)
{
/* Windows can span multiple monitors, but must be usable. The desktop can have a larger
* surface than all monitors combined, for example when two monitors are aligned diagonally.
* Therefore we ensure that all the window's corners are within some monitor's Work area. */
POINT pt;
HMONITOR hmonitor;
/* Get Details of the closest monitor. */
HMONITOR hmonitor = MonitorFromRect(win_rect, MONITOR_DEFAULTTONEAREST);
MONITORINFOEX monitor;
monitor.cbSize = sizeof(MONITORINFOEX);
monitor.dwFlags = 0;
/* Note that with MonitorFromPoint using MONITOR_DEFAULTTONEAREST, it will return
* the exact monitor if there is one at the location or the nearest monitor if not. */
/* Top-left. */
pt.x = win_rect->left;
pt.y = win_rect->top;
hmonitor = MonitorFromPoint(pt, MONITOR_DEFAULTTONEAREST);
GetMonitorInfo(hmonitor, &monitor);
win_rect->top = max(win_rect->top, monitor.rcWork.top);
win_rect->left = max(win_rect->left, monitor.rcWork.left);
/* Top-right. */
pt.x = win_rect->right;
pt.y = win_rect->top;
hmonitor = MonitorFromPoint(pt, MONITOR_DEFAULTTONEAREST);
GetMonitorInfo(hmonitor, &monitor);
win_rect->top = max(win_rect->top, monitor.rcWork.top);
win_rect->right = min(win_rect->right, monitor.rcWork.right);
/* Bottom-left. */
pt.x = win_rect->left;
pt.y = win_rect->bottom;
hmonitor = MonitorFromPoint(pt, MONITOR_DEFAULTTONEAREST);
GetMonitorInfo(hmonitor, &monitor);
win_rect->bottom = min(win_rect->bottom, monitor.rcWork.bottom);
win_rect->left = max(win_rect->left, monitor.rcWork.left);
/* Bottom-right. */
pt.x = win_rect->right;
pt.y = win_rect->bottom;
hmonitor = MonitorFromPoint(pt, MONITOR_DEFAULTTONEAREST);
GetMonitorInfo(hmonitor, &monitor);
win_rect->bottom = min(win_rect->bottom, monitor.rcWork.bottom);
win_rect->right = min(win_rect->right, monitor.rcWork.right);
/* Constrain requested size and position to fit within this monitor. */
LONG width = min(monitor.rcWork.right - monitor.rcWork.left, win_rect->right - win_rect->left);
LONG height = min(monitor.rcWork.bottom - monitor.rcWork.top, win_rect->bottom - win_rect->top);
win_rect->left = min(max(monitor.rcWork.left, win_rect->left), monitor.rcWork.right - width);
win_rect->right = win_rect->left + width;
win_rect->top = min(max(monitor.rcWork.top, win_rect->top), monitor.rcWork.bottom - height);
win_rect->bottom = win_rect->top + height;
/* With Windows 10 and newer we can adjust for chrome that differs with DPI and scale. */
GHOST_WIN32_AdjustWindowRectExForDpi fpAdjustWindowRectExForDpi = nullptr;
@ -362,6 +334,9 @@ void GHOST_WindowWin32::adjustWindowRectForDesktop(LPRECT win_rect, DWORD dwStyl
else {
AdjustWindowRectEx(win_rect, dwStyle & ~WS_OVERLAPPED, FALSE, dwExStyle);
}
/* But never allow a top position that can hide part of the title bar. */
win_rect->top = max(monitor.rcWork.top, win_rect->top);
}
bool GHOST_WindowWin32::getValid() const

@ -87,12 +87,12 @@ class GHOST_WindowWin32 : public GHOST_Window {
~GHOST_WindowWin32();
/**
* Adjusts a requested window rect to fit and position within the desktop.
* Adjusts a requested window rect to fit and position correctly in monitor.
* \param win_rect: pointer to rectangle that will be modified.
* \param dwStyle: The Window Style of the window whose required size is to be calculated.
* \param dwExStyle: The Extended Window Style of the window.
*/
void adjustWindowRectForDesktop(LPRECT win_rect, DWORD dwStyle, DWORD dwExStyle);
void adjustWindowRectForClosestMonitor(LPRECT win_rect, DWORD dwStyle, DWORD dwExStyle);
/**
* Returns indication as to whether the window is valid.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 823 KiB

After

Width:  |  Height:  |  Size: 678 KiB

@ -5631,8 +5631,6 @@ def km_curves(params):
("curves.select_linked", {"type": 'L', "value": 'PRESS', "ctrl": True}, None),
("curves.delete", {"type": 'X', "value": 'PRESS'}, None),
("curves.delete", {"type": 'DEL', "value": 'PRESS'}, None),
("curves.select_more", {"type": 'NUMPAD_PLUS', "value": 'PRESS', "ctrl": True, "repeat": True}, None),
("curves.select_less", {"type": 'NUMPAD_MINUS', "value": 'PRESS', "ctrl": True, "repeat": True}, None),
])
return keymap

@ -25,7 +25,7 @@ def build_default_empty_geometry_node_group(name):
def geometry_node_group_empty_new():
group = build_default_empty_geometry_node_group(data_("Geometry Nodes"))
group.links.new(group.nodes[data_("Group Input")].outputs[0], group.nodes[data_("Group Output")].inputs[0])
group.links.new(group.nodes["Group Input"].outputs[0], group.nodes["Group Output"].inputs[0])
return group
@ -121,8 +121,8 @@ class MoveModifierToNodes(Operator):
group_node.node_tree = old_group
group_node.update()
group_input_node = group.nodes[data_("Group Input")]
group_output_node = group.nodes[data_("Group Output")]
group_input_node = group.nodes["Group Input"]
group_output_node = group.nodes["Group Output"]
# Copy default values for inputs and create named attribute input nodes.
input_nodes = []
@ -173,7 +173,6 @@ class MoveModifierToNodes(Operator):
first_geometry_output = group_node_output
# Adjust locations of store named attribute nodes and move group output.
# Note that the node group has its sockets names translated, while the built-in nodes don't.
if store_nodes:
for i, node in enumerate(store_nodes):
node.location.x = (i + 1) * 175
@ -183,10 +182,9 @@ class MoveModifierToNodes(Operator):
group.links.new(first_geometry_output, store_nodes[0].inputs["Geometry"])
for i in range(len(store_nodes) - 1):
group.links.new(store_nodes[i].outputs["Geometry"], store_nodes[i + 1].inputs["Geometry"])
group.links.new(store_nodes[-1].outputs["Geometry"], group_output_node.inputs[data_("Geometry")])
group.links.new(store_nodes[-1].outputs["Geometry"], group_output_node.inputs["Geometry"])
else:
group.links.new(first_geometry_output, group_output_node.inputs[data_("Geometry")])
group.links.new(first_geometry_output, group_output_node.inputs["Geometry"])
modifier.node_group = group

@ -604,7 +604,7 @@ class DATA_PT_mesh_attributes(MeshButtonsPanel, Panel):
colliding_names = []
for collection in (
# Built-in names.
{"shade_smooth": None, "crease": None},
{"shade_smooth": None, "normal": None, "crease": None},
mesh.attributes,
None if ob is None else ob.vertex_groups,
):

@ -2044,16 +2044,6 @@ class VIEW3D_MT_select_paint_mask_vertex(Menu):
layout.operator("paint.vert_select_linked", text="Select Linked")
class VIEW3D_MT_edit_curves_select_more_less(Menu):
bl_label = "Select More/Less"
def draw(self, _context):
layout = self.layout
layout.operator("curves.select_more", text="More")
layout.operator("curves.select_less", text="Less")
class VIEW3D_MT_select_edit_curves(Menu):
bl_label = "Select"
@ -2063,17 +2053,10 @@ class VIEW3D_MT_select_edit_curves(Menu):
layout.operator("curves.select_all", text="All").action = 'SELECT'
layout.operator("curves.select_all", text="None").action = 'DESELECT'
layout.operator("curves.select_all", text="Invert").action = 'INVERT'
layout.separator()
layout.operator("curves.select_random", text="Random")
layout.operator("curves.select_end", text="Endpoints")
layout.operator("curves.select_linked", text="Linked")
layout.separator()
layout.menu("VIEW3D_MT_edit_curves_select_more_less")
class VIEW3D_MT_select_sculpt_curves(Menu):
bl_label = "Select"
@ -8063,7 +8046,6 @@ classes = (
VIEW3D_MT_select_gpencil,
VIEW3D_MT_select_paint_mask,
VIEW3D_MT_select_paint_mask_vertex,
VIEW3D_MT_edit_curves_select_more_less,
VIEW3D_MT_select_edit_curves,
VIEW3D_MT_select_sculpt_curves,
VIEW3D_MT_mesh_add,

@ -41,7 +41,6 @@ typedef enum eAttrDomainMask {
ATTR_DOMAIN_MASK_CURVE = (1 << 4),
ATTR_DOMAIN_MASK_ALL = (1 << 5) - 1
} eAttrDomainMask;
ENUM_OPERATORS(eAttrDomainMask, ATTR_DOMAIN_MASK_ALL);
#define ATTR_DOMAIN_AS_MASK(domain) ((eAttrDomainMask)((1 << (int)(domain))))

@ -17,15 +17,17 @@ extern "C" {
*/
/* Blender major and minor version. */
#define BLENDER_VERSION 306
#define BLENDER_VERSION 305
/* Blender patch version for bugfix releases. */
#define BLENDER_VERSION_PATCH 0
/** Blender release cycle stage: alpha/beta/rc/release. */
#define BLENDER_VERSION_CYCLE alpha
#define BLENDER_VERSION_CYCLE beta
/* TODO proper version bump. */
/* Blender file format version. */
#define BLENDER_FILE_VERSION BLENDER_VERSION
#define BLENDER_FILE_SUBVERSION 0
#define BLENDER_FILE_SUBVERSION 10
/* Minimum Blender version that supports reading file written with the current
* version. Older Blender versions will test this and show a warning if the file

@ -108,7 +108,7 @@ BVHTree *bvhtree_from_editmesh_verts(
*/
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
blender::BitSpan mask,
const blender::BitVector<> &mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -124,7 +124,7 @@ BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
BVHTree *bvhtree_from_mesh_verts_ex(struct BVHTreeFromMesh *data,
const float (*vert_positions)[3],
int verts_num,
blender::BitSpan verts_mask,
const blender::BitVector<> &verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -138,7 +138,7 @@ BVHTree *bvhtree_from_editmesh_edges(
*/
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
blender::BitSpan edges_mask,
const blender::BitVector<> &edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -156,7 +156,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(struct BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const struct MEdge *edge,
int edges_num,
blender::BitSpan edges_mask,
const blender::BitVector<> &edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -170,7 +170,7 @@ BVHTree *bvhtree_from_editmesh_looptri(
*/
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
blender::BitSpan mask,
const blender::BitVector<> &mask,
int looptri_num_active,
float epsilon,
int tree_type,
@ -184,7 +184,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(struct BVHTreeFromMesh *data,
const struct MLoop *mloop,
const struct MLoopTri *looptri,
int looptri_num,
blender::BitSpan mask,
const blender::BitVector<> &mask,
int looptri_num_active,
float epsilon,
int tree_type,

@ -282,7 +282,6 @@ bool CustomData_has_layer(const struct CustomData *data, int type);
* Returns the number of layers with this type.
*/
int CustomData_number_of_layers(const struct CustomData *data, int type);
int CustomData_number_of_anonymous_layers(const struct CustomData *data, int type);
int CustomData_number_of_layers_typemask(const struct CustomData *data, eCustomDataMask mask);
/**

@ -41,10 +41,10 @@ struct PropertyRNA;
*/
void BKE_nlastrip_free(struct NlaStrip *strip, bool do_id_user);
/**
* Remove & Frees all NLA strips from the given NLA track,
* then frees (doesn't remove) the track itself.
* Remove the given NLA track from the set of NLA tracks, free the track's data,
* and the track itself.
*/
void BKE_nlatrack_free(struct NlaTrack *nlt, bool do_id_user);
void BKE_nlatrack_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user);
/**
* Free the elements of type NLA Tracks provided in the given list, but do not free
* the list itself since that is not free-standing
@ -95,17 +95,6 @@ struct NlaTrack *BKE_nlatrack_add(struct AnimData *adt,
struct NlaTrack *prev,
bool is_liboverride);
/**
* Removes the given NLA track from the list of tracks provided.
*/
void BKE_nlatrack_remove(ListBase *tracks, struct NlaTrack *nlt);
/**
* Remove the given NLA track from the list of NLA tracks, free the track's data,
* and the track itself.
*/
void BKE_nlatrack_remove_and_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user);
/**
* Create a NLA Strip referencing the given Action.
*/

@ -316,6 +316,9 @@ GVArray BuiltinCustomDataLayerProvider::try_get_for_read(const void *owner) cons
GAttributeWriter BuiltinCustomDataLayerProvider::try_get_for_write(void *owner) const
{
if (writable_ != Writable) {
return {};
}
CustomData *custom_data = custom_data_access_.get_custom_data(owner);
if (custom_data == nullptr) {
return {};

@ -37,6 +37,10 @@ class BuiltinAttributeProvider {
Creatable,
NonCreatable,
};
enum WritableEnum {
Writable,
Readonly,
};
enum DeletableEnum {
Deletable,
NonDeletable,
@ -47,6 +51,7 @@ class BuiltinAttributeProvider {
const eAttrDomain domain_;
const eCustomDataType data_type_;
const CreatableEnum createable_;
const WritableEnum writable_;
const DeletableEnum deletable_;
const AttributeValidator validator_;
@ -55,12 +60,14 @@ class BuiltinAttributeProvider {
const eAttrDomain domain,
const eCustomDataType data_type,
const CreatableEnum createable,
const WritableEnum writable,
const DeletableEnum deletable,
AttributeValidator validator = {})
: name_(std::move(name)),
domain_(domain),
data_type_(data_type),
createable_(createable),
writable_(writable),
deletable_(deletable),
validator_(validator)
{
@ -198,14 +205,20 @@ class BuiltinCustomDataLayerProvider final : public BuiltinAttributeProvider {
const eCustomDataType attribute_type,
const eCustomDataType stored_type,
const CreatableEnum creatable,
const WritableEnum writable,
const DeletableEnum deletable,
const CustomDataAccessInfo custom_data_access,
const AsReadAttribute as_read_attribute,
const AsWriteAttribute as_write_attribute,
const UpdateOnChange update_on_write,
const AttributeValidator validator = {})
: BuiltinAttributeProvider(
std::move(attribute_name), domain, attribute_type, creatable, deletable, validator),
: BuiltinAttributeProvider(std::move(attribute_name),
domain,
attribute_type,
creatable,
writable,
deletable,
validator),
stored_type_(stored_type),
custom_data_access_(custom_data_access),
as_read_attribute_(as_read_attribute),

@ -29,7 +29,6 @@
#include "MEM_guardedalloc.h"
using blender::BitSpan;
using blender::BitVector;
using blender::float3;
using blender::IndexRange;
@ -673,7 +672,7 @@ static BVHTree *bvhtree_from_editmesh_verts_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitSpan verts_mask,
const BitVector<> &verts_mask,
int verts_num_active)
{
BM_mesh_elem_table_ensure(em->bm, BM_VERT);
@ -707,7 +706,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
int axis,
const float (*positions)[3],
const int verts_num,
const BitSpan verts_mask,
const BitVector<> &verts_mask,
int verts_num_active)
{
if (!verts_mask.is_empty()) {
@ -738,7 +737,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitSpan verts_mask,
const BitVector<> &verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -765,7 +764,7 @@ BVHTree *bvhtree_from_editmesh_verts(
BVHTree *bvhtree_from_mesh_verts_ex(BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const int verts_num,
const BitSpan verts_mask,
const BitVector<> &verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -795,7 +794,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitSpan edges_mask,
const BitVector<> &edges_mask,
int edges_num_active)
{
BM_mesh_elem_table_ensure(em->bm, BM_EDGE);
@ -834,7 +833,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
const MEdge *edge,
const int edge_num,
const BitSpan edges_mask,
const BitVector<> &edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -872,7 +871,7 @@ static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitSpan edges_mask,
const BitVector<> &edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -900,7 +899,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const MEdge *edge,
const int edges_num,
const BitSpan edges_mask,
const BitVector<> &edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -932,7 +931,7 @@ static BVHTree *bvhtree_from_mesh_faces_create_tree(float epsilon,
const float (*positions)[3],
const MFace *face,
const int faces_num,
const BitSpan faces_mask,
const BitVector<> &faces_mask,
int faces_num_active)
{
if (faces_num == 0) {
@ -985,7 +984,7 @@ static BVHTree *bvhtree_from_editmesh_looptri_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitSpan looptri_mask,
const BitVector<> &looptri_mask,
int looptri_num_active)
{
const int looptri_num = em->tottri;
@ -1039,7 +1038,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
const MLoop *mloop,
const MLoopTri *looptri,
const int looptri_num,
const BitSpan looptri_mask,
const BitVector<> &looptri_mask,
int looptri_num_active)
{
if (!looptri_mask.is_empty()) {
@ -1080,7 +1079,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitSpan looptri_mask,
const BitVector<> &looptri_mask,
int looptri_num_active,
float epsilon,
int tree_type,
@ -1110,7 +1109,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(BVHTreeFromMesh *data,
const struct MLoop *mloop,
const struct MLoopTri *looptri,
const int looptri_num,
const BitSpan looptri_mask,
const BitVector<> &looptri_mask,
int looptri_num_active,
float epsilon,
int tree_type,

@ -2991,19 +2991,6 @@ int CustomData_number_of_layers(const CustomData *data, const int type)
return number;
}
int CustomData_number_of_anonymous_layers(const CustomData *data, const int type)
{
int number = 0;
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type && data->layers[i].anonymous_id != nullptr) {
number++;
}
}
return number;
}
int CustomData_number_of_layers_typemask(const CustomData *data, const eCustomDataMask mask)
{
int number = 0;

@ -375,6 +375,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::NonDeletable,
point_access,
make_array_read_attribute<float3>,
@ -386,6 +387,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT,
CD_PROP_FLOAT,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float>,
@ -397,6 +399,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<int>,
@ -408,6 +411,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT,
CD_PROP_FLOAT,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float>,
@ -419,6 +423,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float3>,
@ -430,6 +435,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float3>,
@ -447,6 +453,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<int8_t>,
@ -459,6 +466,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<int8_t>,
@ -471,6 +479,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_FLOAT,
CD_PROP_FLOAT,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float>,
@ -486,6 +495,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<int8_t>,
@ -504,6 +514,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<int8_t>,
@ -522,6 +533,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<int8_t>,
@ -540,6 +552,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT8,
CD_PROP_INT8,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<int8_t>,
@ -556,6 +569,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<int>,
@ -568,6 +582,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
CD_PROP_BOOL,
CD_PROP_BOOL,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
curve_access,
make_array_read_attribute<bool>,

@ -129,7 +129,7 @@ class InstancePositionAttributeProvider final : public BuiltinAttributeProvider
public:
InstancePositionAttributeProvider()
: BuiltinAttributeProvider(
"position", ATTR_DOMAIN_INSTANCE, CD_PROP_FLOAT3, NonCreatable, NonDeletable)
"position", ATTR_DOMAIN_INSTANCE, CD_PROP_FLOAT3, NonCreatable, Writable, NonDeletable)
{
}
@ -200,6 +200,7 @@ static ComponentAttributeProviders create_attribute_providers_for_instances()
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
instance_custom_data_access,
make_array_read_attribute<int>,

@ -1137,6 +1137,48 @@ class VertexGroupsAttributeProvider final : public DynamicAttributesProvider {
}
};
/**
* This provider makes face normals available as a read-only float3 attribute.
*/
class NormalAttributeProvider final : public BuiltinAttributeProvider {
public:
NormalAttributeProvider()
: BuiltinAttributeProvider(
"normal", ATTR_DOMAIN_FACE, CD_PROP_FLOAT3, NonCreatable, Readonly, NonDeletable)
{
}
GVArray try_get_for_read(const void *owner) const final
{
const Mesh *mesh = static_cast<const Mesh *>(owner);
if (mesh == nullptr || mesh->totpoly == 0) {
return {};
}
return VArray<float3>::ForSpan({(float3 *)BKE_mesh_poly_normals_ensure(mesh), mesh->totpoly});
}
GAttributeWriter try_get_for_write(void * /*owner*/) const final
{
return {};
}
bool try_delete(void * /*owner*/) const final
{
return false;
}
bool try_create(void * /*owner*/, const AttributeInit & /*initializer*/) const final
{
return false;
}
bool exists(const void *owner) const final
{
const Mesh *mesh = static_cast<const Mesh *>(owner);
return mesh->totpoly != 0;
}
};
/**
* In this function all the attribute providers for a mesh component are created. Most data in this
* function is statically allocated, because it does not change over time.
@ -1180,17 +1222,21 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::NonDeletable,
point_access,
make_array_read_attribute<float3>,
make_array_write_attribute<float3>,
tag_component_positions_changed);
static NormalAttributeProvider normal;
static BuiltinCustomDataLayerProvider id("id",
ATTR_DOMAIN_POINT,
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<int>,
@ -1209,6 +1255,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
face_access,
make_array_read_attribute<int>,
@ -1222,6 +1269,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
CD_PROP_BOOL,
CD_MPOLY,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::NonDeletable,
face_access,
make_derived_read_attribute<MPoly, bool, get_shade_smooth>,
@ -1233,6 +1281,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
CD_PROP_BOOL,
CD_PROP_BOOL,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
edge_access,
make_array_read_attribute<bool>,
@ -1245,6 +1294,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
CD_PROP_FLOAT,
CD_CREASE,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
edge_access,
make_array_read_attribute<float>,
@ -1258,7 +1308,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
static CustomDataAttributeProvider face_custom_data(ATTR_DOMAIN_FACE, face_access);
return ComponentAttributeProviders(
{&position, &id, &material_index, &shade_smooth, &sharp_edge, &crease},
{&position, &id, &material_index, &shade_smooth, &sharp_edge, &normal, &crease},
{&corner_custom_data,
&vertex_groups,
&point_custom_data,

@ -142,6 +142,7 @@ static ComponentAttributeProviders create_attribute_providers_for_point_cloud()
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::NonDeletable,
point_access,
make_array_read_attribute<float3>,
@ -152,6 +153,7 @@ static ComponentAttributeProviders create_attribute_providers_for_point_cloud()
CD_PROP_FLOAT,
CD_PROP_FLOAT,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<float>,
@ -162,6 +164,7 @@ static ComponentAttributeProviders create_attribute_providers_for_point_cloud()
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::Writable,
BuiltinAttributeProvider::Deletable,
point_access,
make_array_read_attribute<int>,

@ -17,6 +17,7 @@
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
#include "BLI_bit_vector.hh"
#include "BLI_bounds.hh"
#include "BLI_edgehash.h"
#include "BLI_endian_switch.h"
@ -65,6 +66,7 @@
#include "BLO_read_write.h"
using blender::BitVector;
using blender::float3;
using blender::MutableSpan;
using blender::Span;

@ -316,7 +316,7 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
BKE_mesh_vertex_normals_ensure(mesh) :
nullptr;
const int *index = static_cast<const int *>(CustomData_get_layer(&mesh->pdata, CD_ORIGINDEX));
const blender::BitSpan facedot_tags = mesh->runtime->subsurf_face_dot_tags;
const blender::BitVector<> &facedot_tags = mesh->runtime->subsurf_face_dot_tags;
if (index) {
for (int i = 0; i < mesh->totpoly; i++, mp++) {

@ -42,7 +42,6 @@
using blender::BitVector;
using blender::float3;
using blender::int2;
using blender::MutableBitSpan;
using blender::MutableSpan;
using blender::short2;
using blender::Span;
@ -1239,7 +1238,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
const Span<int2> edge_to_loops,
const Span<int> loop_to_poly,
const int *e2l_prev,
MutableBitSpan skip_loops,
BitVector<> &skip_loops,
const int ml_curr_index,
const int ml_prev_index,
const int mp_curr_index)

@ -91,7 +91,7 @@ void BKE_nlastrip_free(NlaStrip *strip, const bool do_id_user)
MEM_freeN(strip);
}
void BKE_nlatrack_free(NlaTrack *nlt, const bool do_id_user)
void BKE_nlatrack_free(ListBase *tracks, NlaTrack *nlt, bool do_id_user)
{
NlaStrip *strip, *stripn;
@ -107,7 +107,12 @@ void BKE_nlatrack_free(NlaTrack *nlt, const bool do_id_user)
}
/* free NLA track itself now */
MEM_freeN(nlt);
if (tracks) {
BLI_freelinkN(tracks, nlt);
}
else {
MEM_freeN(nlt);
}
}
void BKE_nla_tracks_free(ListBase *tracks, bool do_id_user)
@ -122,7 +127,7 @@ void BKE_nla_tracks_free(ListBase *tracks, bool do_id_user)
/* free tracks one by one */
for (nlt = tracks->first; nlt; nlt = nltn) {
nltn = nlt->next;
BKE_nlatrack_remove_and_free(tracks, nlt, do_id_user);
BKE_nlatrack_free(tracks, nlt, do_id_user);
}
/* clear the list's pointers to be safe */
@ -509,20 +514,6 @@ void BKE_nla_strip_foreach_id(NlaStrip *strip, LibraryForeachIDData *data)
}
}
/* Removing ------------------------------------------ */
void BKE_nlatrack_remove(ListBase *tracks, struct NlaTrack *nlt)
{
BLI_assert(tracks);
BLI_remlink(tracks, nlt);
}
void BKE_nlatrack_remove_and_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user)
{
BKE_nlatrack_remove(tracks, nlt);
BKE_nlatrack_free(nlt, do_id_user);
}
/* *************************************************** */
/* NLA Evaluation <-> Editing Stuff */

@ -91,29 +91,4 @@ TEST(nla_track, BKE_nlatrack_remove_strip)
EXPECT_EQ(-1, BLI_findindex(&track.strips, &strip2));
}
TEST(nla_track, BKE_nlatrack_remove_and_free)
{
AnimData adt{};
NlaTrack *track1;
NlaTrack *track2;
// Add NLA tracks to the Animation Data.
track1 = BKE_nlatrack_add(&adt, NULL, false);
track2 = BKE_nlatrack_add(&adt, track1, false);
// ensure we have 2 tracks in the track.
EXPECT_EQ(2, BLI_listbase_count(&adt.nla_tracks));
BKE_nlatrack_remove_and_free(&adt.nla_tracks, track2, false);
EXPECT_EQ(1, BLI_listbase_count(&adt.nla_tracks));
// ensure the correct track was removed.
EXPECT_EQ(-1, BLI_findindex(&adt.nla_tracks, track2));
// free the rest of the tracks, and ensure they are removed.
BKE_nlatrack_remove_and_free(&adt.nla_tracks, track1, false);
EXPECT_EQ(0, BLI_listbase_count(&adt.nla_tracks));
EXPECT_EQ(-1, BLI_findindex(&adt.nla_tracks, track1));
}
} // namespace blender::bke::tests

@ -2266,8 +2266,6 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
ob->sculpt->islands_valid = false;
if (ob->sculpt->bm != nullptr) {
/* Sculpting on a BMesh (dynamic-topology) gets a special PBVH. */
pbvh = build_pbvh_for_dynamic_topology(ob);

@ -956,9 +956,7 @@ ScrArea *BKE_screen_area_map_find_area_xy(const ScrAreaMap *areamap,
const int xy[2])
{
LISTBASE_FOREACH (ScrArea *, area, &areamap->areabase) {
/* Test area's outer screen verts, not inner totrct. */
if (xy[0] >= area->v1->vec.x && xy[0] <= area->v4->vec.x && xy[1] >= area->v1->vec.y &&
xy[1] <= area->v2->vec.y) {
if (BLI_rcti_isect_pt_v(&area->totrct, xy)) {
if (ELEM(spacetype, SPACE_TYPE_ANY, area->spacetype)) {
return area;
}

@ -1,234 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*
* This file provides the basis for processing "indexed bits" (i.e. every bit has an index).
* The main purpose of this file is to define how bits are indexed within a memory buffer.
* For example, one has to define whether the first bit is the least or most significant bit and
* how endianness affect the bit order.
*
* The order is defined as follows:
* - Every indexed bit is part of an #BitInt. These ints are ordered by their address as usual.
* - Within each #BitInt, the bits are ordered from least to most significant.
*/
#include "BLI_index_range.hh"
#include "BLI_utildefines.h"
#include <ostream>
namespace blender::bits {
/** Using a large integer type is better because then it's easier to process many bits at once. */
using BitInt = uint64_t;
/** Number of bits that fit into #BitInt. */
static constexpr int64_t BitsPerInt = int64_t(sizeof(BitInt) * 8);
/** Shift amount to get from a bit index to an int index. Equivalent to `log(BitsPerInt, 2)`. */
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(BitInt) >= 2) + (sizeof(BitInt) >= 4) +
(sizeof(BitInt) >= 8);
/** Bit mask containing a 1 for the last few bits that index a bit inside of an #BitInt. */
static constexpr BitInt BitIndexMask = (BitInt(1) << BitToIntIndexShift) - 1;
inline BitInt mask_first_n_bits(const int64_t n)
{
BLI_assert(n >= 0);
BLI_assert(n <= BitsPerInt);
if (n == BitsPerInt) {
return BitInt(-1);
}
return (BitInt(1) << n) - 1;
}
inline BitInt mask_last_n_bits(const int64_t n)
{
return ~mask_first_n_bits(BitsPerInt - n);
}
inline BitInt mask_range_bits(const int64_t start, const int64_t size)
{
BLI_assert(start >= 0);
BLI_assert(size >= 0);
const int64_t end = start + size;
BLI_assert(end <= BitsPerInt);
if (end == BitsPerInt) {
return mask_last_n_bits(size);
}
return ((BitInt(1) << end) - 1) & ~((BitInt(1) << start) - 1);
}
inline BitInt mask_single_bit(const int64_t bit_index)
{
BLI_assert(bit_index >= 0);
BLI_assert(bit_index < BitsPerInt);
return BitInt(1) << bit_index;
}
inline BitInt *int_containing_bit(BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
inline const BitInt *int_containing_bit(const BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the exact integer that the bit is in. */
const BitInt *int_;
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
BitInt *int_;
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.int_ = int_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*int_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*int_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise. If the value is highly unpredictable
* by the CPU branch predictor, it can be faster to use #set_branchless instead.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
/**
* Does the same as #set, but does not use a branch. This is faster when the input value is
* unpredictable for the CPU branch predictor (best case for this function is a uniform random
* distribution with 50% probability for true and false). If the value is predictable, this is
* likely slower than #set.
*/
void set_branchless(const bool value)
{
const BitInt value_int = BitInt(value);
BLI_assert(ELEM(value_int, 0, 1));
const BitInt old = *int_;
*int_ =
/* Unset bit. */
(~mask_ & old)
/* Optionally set it again. The -1 turns a 1 into `0x00...` and a 0 into `0xff...`. */
| (mask_ & ~(value_int - 1));
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitRef &bit)
{
return stream << (bit ? "1" : "0");
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitRef &bit)
{
return stream << BitRef(bit);
}
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::MutableBitRef;
} // namespace blender

@ -1,290 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BLI_bit_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
namespace blender::bits {
/** Base class for a const and non-const bit-iterator. */
class BitIteratorBase {
protected:
const BitInt *data_;
int64_t bit_index_;
public:
BitIteratorBase(const BitInt *data, const int64_t bit_index) : data_(data), bit_index_(bit_index)
{
}
BitIteratorBase &operator++()
{
bit_index_++;
return *this;
}
friend bool operator!=(const BitIteratorBase &a, const BitIteratorBase &b)
{
BLI_assert(a.data_ == b.data_);
return a.bit_index_ != b.bit_index_;
}
};
/** Allows iterating over the bits in a memory buffer. */
class BitIterator : public BitIteratorBase {
public:
BitIterator(const BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
BitRef operator*() const
{
return BitRef(data_, bit_index_);
}
};
/** Allows iterating over the bits in a memory buffer. */
class MutableBitIterator : public BitIteratorBase {
public:
MutableBitIterator(BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
MutableBitRef operator*() const
{
return MutableBitRef(const_cast<BitInt *>(data_), bit_index_);
}
};
/**
* Similar to #Span, but references a range of bits instead of normal C++ types (which must be at
* least one byte large). Use #MutableBitSpan if the values are supposed to be modified.
*
* The beginning and end of a #BitSpan does *not* have to be at byte/int boundaries. It can start
* and end at any bit.
*/
class BitSpan {
private:
/** Base pointer to the integers containing the bits. The actual bit span might start at a much
* higher address when `bit_range_.start()` is large. */
const BitInt *data_ = nullptr;
/** The range of referenced bits. */
IndexRange bit_range_ = {0, 0};
public:
/** Construct an empty span. */
BitSpan() = default;
BitSpan(const BitInt *data, const int64_t size_in_bits) : data_(data), bit_range_(size_in_bits)
{
}
BitSpan(const BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
/** Number of bits referenced by the span. */
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
BitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
BitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
const BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
BitIterator begin() const
{
return {data_, bit_range_.start()};
}
BitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
};
/** Same as #BitSpan, but also allows modifying the referenced bits. */
class MutableBitSpan {
private:
BitInt *data_ = nullptr;
IndexRange bit_range_ = {0, 0};
public:
MutableBitSpan() = default;
MutableBitSpan(BitInt *data, const int64_t size) : data_(data), bit_range_(size)
{
}
MutableBitSpan(BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
MutableBitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
MutableBitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
MutableBitIterator begin() const
{
return {data_, bit_range_.start()};
}
MutableBitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
operator BitSpan() const
{
return {data_, bit_range_};
}
/** Sets all referenced bits to 1. */
void set_all()
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int |= first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = BitInt(-1);
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int |= last_int_mask;
}
}
/** Sets all referenced bits to 0. */
void reset_all()
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int &= ~first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = 0;
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int &= ~last_int_mask;
}
}
/** Sets all referenced bits to either 0 or 1. */
void set_all(const bool value)
{
if (value) {
this->set_all();
}
else {
this->reset_all();
}
}
/** Same as #set_all to mirror #MutableSpan. */
void fill(const bool value)
{
this->set_all(value);
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitSpan &span)
{
stream << "(Size: " << span.size() << ", ";
for (const BitRef bit : span) {
stream << bit;
}
stream << ")";
return stream;
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span)
{
return stream << BitSpan(span);
}
} // namespace blender::bits
namespace blender {
using bits::BitSpan;
using bits::MutableBitSpan;
} // namespace blender

@ -38,11 +38,142 @@
#include <cstring>
#include "BLI_allocator.hh"
#include "BLI_bit_span.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
#include "BLI_span.hh"
namespace blender::bits {
/**
* Using a large integer type is better because then it's easier to process many bits at once.
*/
using IntType = uint64_t;
static constexpr int64_t BitsPerInt = int64_t(sizeof(IntType) * 8);
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(IntType) >= 2) + (sizeof(IntType) >= 4) +
(sizeof(IntType) >= 8);
static constexpr IntType BitIndexMask = (IntType(1) << BitToIntIndexShift) - 1;
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the integer that the bit is in. */
const IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << (bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << IntType(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.ptr_ = ptr_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*ptr_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*ptr_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
};
template<
/**
* Number of bits that can be stored in the vector without doing an allocation.
@ -62,13 +193,13 @@ class BitVector {
static constexpr int64_t IntsInInlineBuffer = required_ints_for_bits(InlineBufferCapacity);
static constexpr int64_t BitsInInlineBuffer = IntsInInlineBuffer * BitsPerInt;
static constexpr int64_t AllocationAlignment = alignof(BitInt);
static constexpr int64_t AllocationAlignment = alignof(IntType);
/**
* Points to the first integer used by the vector. It might point to the memory in the inline
* buffer.
*/
BitInt *data_;
IntType *data_;
/** Current size of the vector in bits. */
int64_t size_in_bits_;
@ -80,7 +211,7 @@ class BitVector {
BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
/** Contains the bits as long as the vector is small enough. */
BLI_NO_UNIQUE_ADDRESS TypedBuffer<BitInt, IntsInInlineBuffer> inline_buffer_;
BLI_NO_UNIQUE_ADDRESS TypedBuffer<IntType, IntsInInlineBuffer> inline_buffer_;
public:
BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
@ -88,7 +219,7 @@ class BitVector {
data_ = inline_buffer_;
size_in_bits_ = 0;
capacity_in_bits_ = BitsInInlineBuffer;
uninitialized_fill_n(data_, IntsInInlineBuffer, BitInt(0));
uninitialized_fill_n(data_, IntsInInlineBuffer, IntType(0));
}
BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
@ -105,8 +236,8 @@ class BitVector {
}
else {
/* Allocate a new array because the inline buffer is too small. */
data_ = static_cast<BitInt *>(
allocator_.allocate(ints_to_copy * sizeof(BitInt), AllocationAlignment, __func__));
data_ = static_cast<IntType *>(
allocator_.allocate(ints_to_copy * sizeof(IntType), AllocationAlignment, __func__));
capacity_in_bits_ = ints_to_copy * BitsPerInt;
}
size_in_bits_ = other.size_in_bits_;
@ -172,16 +303,6 @@ class BitVector {
return move_assign_container(*this, std::move(other));
}
operator BitSpan() const
{
return {data_, IndexRange(size_in_bits_)};
}
operator MutableBitSpan()
{
return {data_, IndexRange(size_in_bits_)};
}
/**
* Number of bits in the bit vector.
*/
@ -231,24 +352,80 @@ class BitVector {
size_in_bits_++;
}
BitIterator begin() const
class Iterator {
private:
const BitVector *vector_;
int64_t index_;
public:
Iterator(const BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
Iterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const Iterator &a, const Iterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
BitRef operator*() const
{
return (*vector_)[index_];
}
};
class MutableIterator {
private:
BitVector *vector_;
int64_t index_;
public:
MutableIterator(BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
MutableIterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const MutableIterator &a, const MutableIterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
MutableBitRef operator*() const
{
return (*vector_)[index_];
}
};
Iterator begin() const
{
return {data_, 0};
return {*this, 0};
}
BitIterator end() const
Iterator end() const
{
return {data_, size_in_bits_};
return {*this, size_in_bits_};
}
MutableBitIterator begin()
MutableIterator begin()
{
return {data_, 0};
return {*this, 0};
}
MutableBitIterator end()
MutableIterator end()
{
return {data_, size_in_bits_};
return {*this, size_in_bits_};
}
/**
@ -264,8 +441,31 @@ class BitVector {
}
size_in_bits_ = new_size_in_bits;
if (old_size_in_bits < new_size_in_bits) {
MutableBitSpan(data_, IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits))
.set_all(value);
this->fill_range(IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits), value);
}
}
/**
* Set #value for every element in #range.
*/
void fill_range(const IndexRange range, const bool value)
{
const AlignedIndexRanges aligned_ranges = split_index_range_by_alignment(range, BitsPerInt);
/* Fill first few bits. */
for (const int64_t i : aligned_ranges.prefix) {
(*this)[i].set(value);
}
/* Fill entire ints at once. */
const int64_t start_fill_int_index = aligned_ranges.aligned.start() / BitsPerInt;
const int64_t ints_to_fill = aligned_ranges.aligned.size() / BitsPerInt;
const IntType fill_value = value ? IntType(-1) : IntType(0);
initialized_fill_n(data_ + start_fill_int_index, ints_to_fill, fill_value);
/* Fill bits in the end that don't cover a full int. */
for (const int64_t i : aligned_ranges.suffix) {
(*this)[i].set(value);
}
}
@ -274,7 +474,7 @@ class BitVector {
*/
void fill(const bool value)
{
MutableBitSpan(data_, size_in_bits_).set_all(value);
this->fill_range(IndexRange(0, size_in_bits_), value);
}
/**
@ -317,7 +517,7 @@ class BitVector {
}
BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
const BitInt initial_value_for_new_ints = 0)
const IntType initial_value_for_new_ints = 0x00)
{
if (capacity_in_bits_ >= min_capacity_in_bits) {
return;
@ -331,8 +531,8 @@ class BitVector {
const int64_t new_capacity_in_ints = std::max(min_capacity_in_ints, min_new_capacity_in_ints);
const int64_t ints_to_copy = this->used_ints_amount();
BitInt *new_data = static_cast<BitInt *>(
allocator_.allocate(new_capacity_in_ints * sizeof(BitInt), AllocationAlignment, __func__));
IntType *new_data = static_cast<IntType *>(allocator_.allocate(
new_capacity_in_ints * sizeof(IntType), AllocationAlignment, __func__));
uninitialized_copy_n(data_, ints_to_copy, new_data);
/* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
* caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
@ -362,5 +562,7 @@ class BitVector {
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::BitVector;
using bits::MutableBitRef;
} // namespace blender

@ -304,13 +304,6 @@ void rotate_eul(float beul[3], char axis, float angle);
/* Order independent. */
/**
* Manipulate `eul` so it's close to `oldrot` while representing the same rotation
* with the aim of having the minimum difference between all axes.
*
* This is typically done so interpolating the values between two euler rotations
* doesn't add undesired rotation (even rotating multiple times around one axis).
*/
void compatible_eul(float eul[3], const float oldrot[3]);
void add_eul_euleul(float r_eul[3], float a[3], float b[3], short order);

@ -457,8 +457,6 @@ if(WITH_GTESTS)
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
tests/BLI_bit_ref_test.cc
tests/BLI_bit_span_test.cc
tests/BLI_bit_vector_test.cc
tests/BLI_bitmap_test.cc
tests/BLI_bounds_test.cc

@ -1490,18 +1490,15 @@ void rotate_eul(float beul[3], const char axis, const float angle)
void compatible_eul(float eul[3], const float oldrot[3])
{
/* When the rotation exceeds 180 degrees, it can be wrapped by 360 degrees
* to produce a closer match.
* NOTE: Values between `pi` & `2 * pi` work, where `pi` has the lowest number of
* discontinuities and values approaching `2 * pi` center the resulting rotation around zero,
* at the expense of the result being less compatible, see !104856. */
const float pi_thresh = (float)M_PI;
/* we could use M_PI as pi_thresh: which is correct but 5.1 gives better results.
* Checked with baking actions to fcurves - campbell */
const float pi_thresh = (5.1f);
const float pi_x2 = (2.0f * (float)M_PI);
float deul[3];
uint i;
/* Correct differences around 360 degrees first. */
/* correct differences of about 360 degrees first */
for (i = 0; i < 3; i++) {
deul[i] = eul[i] - oldrot[i];
if (deul[i] > pi_thresh) {
@ -1514,17 +1511,29 @@ void compatible_eul(float eul[3], const float oldrot[3])
}
}
uint j = 1, k = 2;
for (i = 0; i < 3; j = k, k = i++) {
/* Check if this axis of rotations larger than 180 degrees and
* the others are smaller than 90 degrees. */
if (fabsf(deul[i]) > M_PI && fabsf(deul[j]) < M_PI_2 && fabsf(deul[k]) < M_PI_2) {
if (deul[i] > 0.0f) {
eul[i] -= pi_x2;
}
else {
eul[i] += pi_x2;
}
/* is 1 of the axis rotations larger than 180 degrees and the other small? NO ELSE IF!! */
if (fabsf(deul[0]) > 3.2f && fabsf(deul[1]) < 1.6f && fabsf(deul[2]) < 1.6f) {
if (deul[0] > 0.0f) {
eul[0] -= pi_x2;
}
else {
eul[0] += pi_x2;
}
}
if (fabsf(deul[1]) > 3.2f && fabsf(deul[2]) < 1.6f && fabsf(deul[0]) < 1.6f) {
if (deul[1] > 0.0f) {
eul[1] -= pi_x2;
}
else {
eul[1] += pi_x2;
}
}
if (fabsf(deul[2]) > 3.2f && fabsf(deul[0]) < 1.6f && fabsf(deul[1]) < 1.6f) {
if (deul[2] > 0.0f) {
eul[2] -= pi_x2;
}
else {
eul[2] += pi_x2;
}
}
}

@ -1,160 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_ref.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_ref, MaskFirstNBits)
{
EXPECT_EQ(mask_first_n_bits(0), 0);
EXPECT_EQ(mask_first_n_bits(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_first_n_bits(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_first_n_bits(63),
0b0111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
EXPECT_EQ(mask_first_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskLastNBits)
{
EXPECT_EQ(mask_last_n_bits(0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(63),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1110);
EXPECT_EQ(mask_last_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskSingleBit)
{
EXPECT_EQ(mask_single_bit(0), 1);
EXPECT_EQ(mask_single_bit(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010);
EXPECT_EQ(mask_single_bit(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(mask_single_bit(63),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_ref, IntContainingBit)
{
std::array<uint64_t, 5> array;
uint64_t *data = array.data();
EXPECT_EQ(int_containing_bit(data, 0), data);
EXPECT_EQ(int_containing_bit(data, 1), data);
EXPECT_EQ(int_containing_bit(data, 63), data);
EXPECT_EQ(int_containing_bit(data, 64), data + 1);
EXPECT_EQ(int_containing_bit(data, 65), data + 1);
EXPECT_EQ(int_containing_bit(data, 100), data + 1);
EXPECT_EQ(int_containing_bit(data, 127), data + 1);
EXPECT_EQ(int_containing_bit(data, 128), data + 2);
const uint64_t *data_const = data;
EXPECT_EQ(int_containing_bit(data_const, 0), data_const);
EXPECT_EQ(int_containing_bit(data_const, 1), data_const);
EXPECT_EQ(int_containing_bit(data_const, 63), data_const);
EXPECT_EQ(int_containing_bit(data_const, 64), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 65), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 100), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 127), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 128), data_const + 2);
}
TEST(bit_ref, Test)
{
uint64_t data = (1 << 3) | (1 << 7);
EXPECT_FALSE(BitRef(&data, 0).test());
EXPECT_FALSE(BitRef(&data, 1).test());
EXPECT_FALSE(BitRef(&data, 2).test());
EXPECT_TRUE(BitRef(&data, 3).test());
EXPECT_FALSE(BitRef(&data, 4));
EXPECT_FALSE(BitRef(&data, 5));
EXPECT_FALSE(BitRef(&data, 6));
EXPECT_TRUE(BitRef(&data, 7));
EXPECT_FALSE(MutableBitRef(&data, 0).test());
EXPECT_FALSE(MutableBitRef(&data, 1).test());
EXPECT_FALSE(MutableBitRef(&data, 2).test());
EXPECT_TRUE(MutableBitRef(&data, 3).test());
EXPECT_FALSE(MutableBitRef(&data, 4));
EXPECT_FALSE(MutableBitRef(&data, 5));
EXPECT_FALSE(MutableBitRef(&data, 6));
EXPECT_TRUE(MutableBitRef(&data, 7));
}
TEST(bit_ref, Set)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 4).set();
EXPECT_EQ(data, (1 << 0) | (1 << 1) | (1 << 4));
MutableBitRef(&data, 5).set(true);
MutableBitRef(&data, 1).set(false);
EXPECT_EQ(data, (1 << 0) | (1 << 4) | (1 << 5));
}
TEST(bit_ref, Reset)
{
uint64_t data = -1;
MutableBitRef(&data, 0).reset();
MutableBitRef(&data, 2).reset();
EXPECT_EQ(data, uint64_t(-1) & ~(1 << 0) & ~(1 << 2));
}
TEST(bit_ref, SetBranchless)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set_branchless(true);
EXPECT_EQ(data, 1);
MutableBitRef(&data, 0).set_branchless(false);
EXPECT_EQ(data, 0);
MutableBitRef(&data, 3).set_branchless(false);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 16);
MutableBitRef(&data, 3).set_branchless(true);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 24);
}
TEST(bit_ref, Cast)
{
uint64_t data = 0;
MutableBitRef mutable_ref(&data, 3);
BitRef ref = mutable_ref;
EXPECT_FALSE(ref);
mutable_ref.set();
EXPECT_TRUE(ref);
}
TEST(bit_ref, MaskRangeBits)
{
EXPECT_EQ(mask_range_bits(0, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(0, 1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_range_bits(0, 5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_range_bits(64, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(63, 1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(59, 5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(8, 3),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0111'0000'0000);
EXPECT_EQ(mask_range_bits(0, 64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
} // namespace blender::bits::tests

@ -1,139 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_span.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_span, DefaultConstructor)
{
{
char buffer[sizeof(BitSpan)];
memset(buffer, 0xff, sizeof(BitSpan));
BitSpan &span = *new (buffer) BitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
{
char buffer[sizeof(MutableBitSpan)];
memset(buffer, 0xff, sizeof(MutableBitSpan));
MutableBitSpan &span = *new (buffer) MutableBitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
}
TEST(bit_span, Iteration)
{
uint64_t data = (1 << 2) | (1 << 3);
const BitSpan span(&data, 30);
EXPECT_EQ(span.size(), 30);
int index = 0;
for (const BitRef bit : span) {
EXPECT_EQ(bit.test(), ELEM(index, 2, 3));
index++;
}
}
TEST(bit_span, MutableIteration)
{
uint64_t data = 0;
MutableBitSpan span(&data, 40);
EXPECT_EQ(span.size(), 40);
int index = 0;
for (MutableBitRef bit : span) {
bit.set(index % 4 == 0);
index++;
}
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0001'0001'0001'0001'0001'0001'0001'0001'0001'0001);
}
TEST(bit_span, SubscriptOperator)
{
uint64_t data[2] = {0, 0};
MutableBitSpan mutable_span(data, 128);
BitSpan span = mutable_span;
EXPECT_EQ(mutable_span.data(), data);
EXPECT_EQ(mutable_span.bit_range(), IndexRange(128));
EXPECT_EQ(span.data(), data);
EXPECT_EQ(span.bit_range(), IndexRange(128));
EXPECT_FALSE(mutable_span[5].test());
EXPECT_FALSE(span[5].test());
mutable_span[5].set(5);
EXPECT_TRUE(mutable_span[5].test());
EXPECT_TRUE(span[5].test());
EXPECT_FALSE(mutable_span[120].test());
EXPECT_FALSE(span[120].test());
mutable_span[120].set(120);
EXPECT_TRUE(mutable_span[120].test());
EXPECT_TRUE(span[120].test());
EXPECT_EQ(data[0],
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(data[1],
0b0000'0001'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_span, RangeConstructor)
{
uint64_t data = 0;
MutableBitSpan mutable_span(&data, IndexRange(4, 3));
BitSpan span = mutable_span;
EXPECT_FALSE(mutable_span[1].test());
EXPECT_FALSE(span[1].test());
mutable_span[0].set(true);
mutable_span[1].set(true);
mutable_span[2].set(true);
mutable_span[0].set(false);
mutable_span[2].set(false);
EXPECT_TRUE(mutable_span[1].test());
EXPECT_TRUE(span[1].test());
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
}
TEST(bit_span, Set)
{
uint64_t data = 0;
MutableBitSpan(&data, 64).set_all(true);
EXPECT_EQ(data, uint64_t(-1));
MutableBitSpan(&data, 64).set_all(false);
EXPECT_EQ(data, uint64_t(0));
MutableBitSpan(&data, IndexRange(4, 8)).set_all(true);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'1111'0000);
MutableBitSpan(&data, IndexRange(8, 30)).set_all(false);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0000);
}
TEST(bit_span, SetSliced)
{
std::array<uint64_t, 10> data;
memset(data.data(), 0, sizeof(data));
MutableBitSpan span{data.data(), 640};
span.slice(IndexRange(5, 500)).set_all(true);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], i >= 5 && i < 505);
}
span.slice(IndexRange(10, 190)).set_all(false);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], (i >= 5 && i < 10) || (i >= 200 && i < 505));
}
}
} // namespace blender::bits::tests

@ -6,7 +6,7 @@
#include "testing/testing.h"
namespace blender::bits::tests {
namespace blender::tests {
TEST(bit_vector, DefaultConstructor)
{
@ -183,4 +183,4 @@ TEST(bit_vector, AppendMany)
EXPECT_TRUE(vec[5]);
}
} // namespace blender::bits::tests
} // namespace blender::tests

@ -290,24 +290,6 @@ TEST(index_range, SplitByAlignment)
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64, 64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64, 64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(4, 8), 64);
EXPECT_EQ(ranges.prefix, IndexRange(4, 8));
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
}
} // namespace blender::tests

@ -211,7 +211,7 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
int BM_iter_mesh_bitmap_from_filter(const char itype,
BMesh *bm,
blender::MutableBitSpan bitmap,
blender::BitVector<> &bitmap,
bool (*test_fn)(BMElem *, void *user_data),
void *user_data)
{
@ -234,7 +234,7 @@ int BM_iter_mesh_bitmap_from_filter(const char itype,
}
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
blender::MutableBitSpan bitmap,
blender::BitVector<> &bitmap,
bool (*test_fn)(BMFace *, void *user_data),
void *user_data)
{

@ -21,7 +21,7 @@
#include "BLI_mempool.h"
#ifdef __cplusplus
# include "BLI_bit_span.hh"
# include "BLI_bit_vector.hh"
#endif
#ifdef __cplusplus
@ -228,14 +228,14 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
int BM_iter_mesh_bitmap_from_filter(char itype,
BMesh *bm,
blender::MutableBitSpan bitmap,
blender::BitVector<> &bitmap,
bool (*test_fn)(BMElem *, void *user_data),
void *user_data);
/**
* Needed when we want to check faces, but return a loop aligned array.
*/
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
blender::MutableBitSpan bitmap,
blender::BitVector<> &bitmap,
bool (*test_fn)(BMFace *, void *user_data),
void *user_data);

@ -557,7 +557,7 @@ static void extract_edituv_fdots_iter_poly_mesh(const MeshRenderData *mr,
const bool mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
if (mr->use_subsurf_fdots) {
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

@ -46,7 +46,7 @@ static void extract_fdots_iter_poly_mesh(const MeshRenderData *mr,
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
if (mr->use_subsurf_fdots) {
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

@ -102,7 +102,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
void *_data)
{
MeshExtract_EdgeFac_Data *data = static_cast<MeshExtract_EdgeFac_Data *>(_data);
const BitSpan optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
const BitVector<> &optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

@ -76,7 +76,7 @@ static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
zero_v3(co);
const MLoop *mloop = mr->mloop;
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const int ml_index_end = mp->loopstart + mp->totloop;
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {

@ -74,7 +74,7 @@ static void extract_fdots_uv_iter_poly_mesh(const MeshRenderData *mr,
void *_data)
{
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

@ -77,7 +77,11 @@ const EnumPropertyItem *ED_asset_library_reference_to_rna_enum_itemf(const bool
if (include_generated) {
const EnumPropertyItem generated_items[] = {
{ASSET_LIBRARY_ALL, "ALL", 0, "All", "Show assets from all of the listed asset libraries"},
{ASSET_LIBRARY_ALL,
"ALL",
ICON_BLANK1,
"All",
"Show assets from all of the listed asset libraries"},
RNA_ENUM_ITEM_SEPR,
{ASSET_LIBRARY_LOCAL,
"LOCAL",
@ -86,7 +90,7 @@ const EnumPropertyItem *ED_asset_library_reference_to_rna_enum_itemf(const bool
"Show the assets currently available in this Blender session"},
{ASSET_LIBRARY_ESSENTIALS,
"ESSENTIALS",
0,
ICON_BLANK1,
"Essentials",
"Show the basic building blocks and utilities coming with Blender"},
{0, nullptr, 0, nullptr, nullptr},
@ -118,7 +122,7 @@ const EnumPropertyItem *ED_asset_library_reference_to_rna_enum_itemf(const bool
const int enum_value = ED_asset_library_reference_to_enum_value(&library_reference);
/* Use library path as description, it's a nice hint for users. */
EnumPropertyItem tmp = {
enum_value, user_library->name, ICON_NONE, user_library->name, user_library->path};
enum_value, user_library->name, ICON_BLANK1, user_library->name, user_library->path};
RNA_enum_item_add(&item, &totitem, &tmp);
}

@ -1008,60 +1008,6 @@ static void CURVES_OT_select_linked(wmOperatorType *ot)
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
}
static int select_more_exec(bContext *C, wmOperator * /*op*/)
{
VectorSet<Curves *> unique_curves = get_unique_editable_curves(*C);
for (Curves *curves_id : unique_curves) {
CurvesGeometry &curves = curves_id->geometry.wrap();
select_adjacent(curves, false);
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
* attribute for now. */
DEG_id_tag_update(&curves_id->id, ID_RECALC_GEOMETRY);
WM_event_add_notifier(C, NC_GEOM | ND_DATA, curves_id);
}
return OPERATOR_FINISHED;
}
static void CURVES_OT_select_more(wmOperatorType *ot)
{
ot->name = "Select More";
ot->idname = __func__;
ot->description = "Grow the selection by one point";
ot->exec = select_more_exec;
ot->poll = editable_curves_point_domain_poll;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
}
static int select_less_exec(bContext *C, wmOperator * /*op*/)
{
VectorSet<Curves *> unique_curves = get_unique_editable_curves(*C);
for (Curves *curves_id : unique_curves) {
CurvesGeometry &curves = curves_id->geometry.wrap();
select_adjacent(curves, true);
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
* attribute for now. */
DEG_id_tag_update(&curves_id->id, ID_RECALC_GEOMETRY);
WM_event_add_notifier(C, NC_GEOM | ND_DATA, curves_id);
}
return OPERATOR_FINISHED;
}
static void CURVES_OT_select_less(wmOperatorType *ot)
{
ot->name = "Select Less";
ot->idname = __func__;
ot->description = "Shrink the selection by one point";
ot->exec = select_less_exec;
ot->poll = editable_curves_point_domain_poll;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
}
namespace surface_set {
static bool surface_set_poll(bContext *C)
@ -1187,8 +1133,6 @@ void ED_operatortypes_curves()
WM_operatortype_append(CURVES_OT_select_random);
WM_operatortype_append(CURVES_OT_select_end);
WM_operatortype_append(CURVES_OT_select_linked);
WM_operatortype_append(CURVES_OT_select_more);
WM_operatortype_append(CURVES_OT_select_less);
WM_operatortype_append(CURVES_OT_surface_set);
WM_operatortype_append(CURVES_OT_delete);
}

@ -267,85 +267,6 @@ void select_linked(bke::CurvesGeometry &curves)
selection.finish();
}
void select_adjacent(bke::CurvesGeometry &curves, const bool deselect)
{
const OffsetIndices points_by_curve = curves.points_by_curve();
bke::GSpanAttributeWriter selection = ensure_selection_attribute(
curves, ATTR_DOMAIN_POINT, CD_PROP_BOOL);
const VArray<bool> cyclic = curves.cyclic();
if (deselect) {
invert_selection(selection.span);
}
if (selection.span.type().is<bool>()) {
MutableSpan<bool> selection_typed = selection.span.typed<bool>();
threading::parallel_for(curves.curves_range(), 256, [&](const IndexRange range) {
for (const int curve_i : range) {
const IndexRange points = points_by_curve[curve_i];
/* Handle all cases in the forward direction. */
for (int point_i = points.first(); point_i < points.last(); point_i++) {
if (!selection_typed[point_i] && selection_typed[point_i + 1]) {
selection_typed[point_i] = true;
}
}
/* Handle all cases in the backwards direction. */
for (int point_i = points.last(); point_i > points.first(); point_i--) {
if (!selection_typed[point_i] && selection_typed[point_i - 1]) {
selection_typed[point_i] = true;
}
}
/* Handle cyclic curve case. */
if (cyclic[curve_i]) {
if (selection_typed[points.first()] != selection_typed[points.last()]) {
selection_typed[points.first()] = true;
selection_typed[points.last()] = true;
}
}
}
});
}
else if (selection.span.type().is<float>()) {
MutableSpan<float> selection_typed = selection.span.typed<float>();
threading::parallel_for(curves.curves_range(), 256, [&](const IndexRange range) {
for (const int curve_i : range) {
const IndexRange points = points_by_curve[curve_i];
/* Handle all cases in the forward direction. */
for (int point_i = points.first(); point_i < points.last(); point_i++) {
if ((selection_typed[point_i] == 0.0f) && (selection_typed[point_i + 1] > 0.0f)) {
selection_typed[point_i] = 1.0f;
}
}
/* Handle all cases in the backwards direction. */
for (int point_i = points.last(); point_i > points.first(); point_i--) {
if ((selection_typed[point_i] == 0.0f) && (selection_typed[point_i - 1] > 0.0f)) {
selection_typed[point_i] = 1.0f;
}
}
/* Handle cyclic curve case. */
if (cyclic[curve_i]) {
if (selection_typed[points.first()] != selection_typed[points.last()]) {
selection_typed[points.first()] = 1.0f;
selection_typed[points.last()] = 1.0f;
}
}
}
});
}
if (deselect) {
invert_selection(selection.span);
}
selection.finish();
}
void select_random(bke::CurvesGeometry &curves,
const eAttrDomain selection_domain,
uint32_t random_seed,

@ -135,11 +135,6 @@ void select_ends(bke::CurvesGeometry &curves, int amount, bool end_points);
*/
void select_linked(bke::CurvesGeometry &curves);
/**
* (De)select all the adjacent points of the current selected points.
*/
void select_adjacent(bke::CurvesGeometry &curves, bool deselect);
/**
* Select random points or curves.
*

@ -239,17 +239,6 @@ enum {
UI_BUT_OVERRIDDEN = 1u << 31u,
};
/** #uiBut.dragflag */
enum {
/** By default only the left part of a button triggers dragging. A questionable design to make
* the icon but not other parts of the button draggable. Set this flag so the entire button can
* be dragged. */
UI_BUT_DRAG_FULL_BUT = (1 << 0),
/* --- Internal flags. --- */
UI_BUT_DRAGPOIN_FREE = (1 << 1),
};
/* Default font size for normal text. */
#define UI_DEFAULT_TEXT_POINTS 11.0f
@ -892,9 +881,6 @@ bool UI_but_flag_is_set(uiBut *but, int flag);
void UI_but_drawflag_enable(uiBut *but, int flag);
void UI_but_drawflag_disable(uiBut *but, int flag);
void UI_but_dragflag_enable(uiBut *but, int flag);
void UI_but_dragflag_disable(uiBut *but, int flag);
void UI_but_disable(uiBut *but, const char *disabled_hint);
void UI_but_type_set_menu_from_pulldown(uiBut *but);
@ -1803,12 +1789,9 @@ void UI_but_drag_set_id(uiBut *but, struct ID *id);
/**
* Set an image to display while dragging. This works for any drag type (`WM_DRAG_XXX`).
* Not to be confused with #UI_but_drag_set_image(), which sets up dragging of an image.
*
* Sets #UI_BUT_DRAG_FULL_BUT so the full button can be dragged.
*/
void UI_but_drag_attach_image(uiBut *but, struct ImBuf *imb, float scale);
/**
* Sets #UI_BUT_DRAG_FULL_BUT so the full button can be dragged.
* \param asset: May be passed from a temporary variable, drag data only stores a copy of this.
*/
void UI_but_drag_set_asset(uiBut *but,
@ -1825,8 +1808,6 @@ void UI_but_drag_set_name(uiBut *but, const char *name);
* Value from button itself.
*/
void UI_but_drag_set_value(uiBut *but);
/** Sets #UI_BUT_DRAG_FULL_BUT so the full button can be dragged. */
void UI_but_drag_set_image(
uiBut *but, const char *path, int icon, struct ImBuf *imb, float scale, bool use_free);

@ -4224,10 +4224,6 @@ static uiBut *ui_def_but(uiBlock *block,
but->flag |= UI_BUT_UNDO;
}
if (ELEM(but->type, UI_BTYPE_COLOR)) {
but->dragflag |= UI_BUT_DRAG_FULL_BUT;
}
BLI_addtail(&block->buttons, but);
if (block->curlayout) {
@ -4291,7 +4287,6 @@ static void ui_def_but_rna__menu(bContext * /*C*/, uiLayout *layout, void *but_p
int totitems = 0;
int categories = 0;
int entries_nosepr_count = 0;
bool has_item_with_icon = false;
for (const EnumPropertyItem *item = item_array; item->identifier; item++, totitems++) {
if (!item->identifier[0]) {
/* inconsistent, but menus with categories do not look good flipped */
@ -4303,9 +4298,6 @@ static void ui_def_but_rna__menu(bContext * /*C*/, uiLayout *layout, void *but_p
/* We do not want simple separators in `entries_nosepr_count`. */
continue;
}
if (item->icon) {
has_item_with_icon = true;
}
entries_nosepr_count++;
}
@ -4410,18 +4402,11 @@ static void ui_def_but_rna__menu(bContext * /*C*/, uiLayout *layout, void *but_p
uiItemS(column);
}
else {
int icon = item->icon;
/* Use blank icon if there is none for this item (but for some other one) to make sure labels
* align. */
if (icon == ICON_NONE && has_item_with_icon) {
icon = ICON_BLANK1;
}
if (icon) {
if (item->icon) {
uiDefIconTextButI(block,
UI_BTYPE_BUT_MENU,
B_NOP,
icon,
item->icon,
item->name,
0,
0,
@ -4600,7 +4585,15 @@ static uiBut *ui_def_but_rna(uiBlock *block,
#endif
}
icon = item[i].icon;
/* #ICON_BLANK1 can be used to add padding of the size of an icon. This is fine to align
* multiple items within a menu, but not for the menu button that only shows the label then.
*/
if ((type == UI_BTYPE_MENU) && (item[i].icon == ICON_BLANK1)) {
icon = ICON_NONE;
}
else {
icon = item[i].icon;
}
}
else {
if (!str) {
@ -5894,16 +5887,6 @@ void UI_but_drawflag_disable(uiBut *but, int flag)
but->drawflag &= ~flag;
}
void UI_but_dragflag_enable(uiBut *but, int flag)
{
but->dragflag |= flag;
}
void UI_but_dragflag_disable(uiBut *but, int flag)
{
but->dragflag &= ~flag;
}
void UI_but_disable(uiBut *but, const char *disabled_hint)
{
UI_but_flag_enable(but, UI_BUT_DISABLED);

@ -24,7 +24,6 @@ void UI_but_drag_attach_image(uiBut *but, struct ImBuf *imb, const float scale)
{
but->imb = imb;
but->imb_scale = scale;
UI_but_dragflag_enable(but, UI_BUT_DRAG_FULL_BUT);
}
void UI_but_drag_set_asset(uiBut *but,

@ -86,6 +86,11 @@ enum {
/* WARNING: rest of #uiBut.flag in UI_interface.h */
};
/** #uiBut.dragflag */
enum {
UI_BUT_DRAGPOIN_FREE = (1 << 0),
};
/** #uiBut.pie_dir */
enum RadialDirection {
UI_RADIAL_NONE = -1,

@ -247,7 +247,7 @@ bool ui_but_contains_point_px_icon(const uiBut *but, ARegion *region, const wmEv
BLI_rcti_rctf_copy(&rect, &but->rect);
if (but->dragflag & UI_BUT_DRAG_FULL_BUT) {
if (but->imb || but->type == UI_BTYPE_COLOR) {
/* use button size itself */
}
else if (but->drawflag & UI_BUT_ICON_LEFT) {

@ -92,9 +92,15 @@ void attribute_search_add_items(StringRefNull str,
StringSearch *search = BLI_string_search_new();
for (const GeometryAttributeInfo *item : infos) {
/* Don't show the legacy "normal" attribute. */
if (item->name == "normal" && item->domain == ATTR_DOMAIN_FACE) {
continue;
}
if (!bke::allow_procedural_attribute_access(item->name)) {
continue;
}
BLI_string_search_add(search, item->name.c_str(), (void *)item, 0);
}

@ -83,7 +83,7 @@ bool SCULPT_is_automasking_mode_enabled(const Sculpt *sd,
bool SCULPT_is_automasking_enabled(const Sculpt *sd, const SculptSession *ss, const Brush *br)
{
if (ss && br && SCULPT_stroke_is_dynamic_topology(ss, br)) {
if (br && SCULPT_stroke_is_dynamic_topology(ss, br)) {
return false;
}
if (SCULPT_is_automasking_mode_enabled(sd, br, BRUSH_AUTOMASKING_TOPOLOGY)) {

@ -1571,13 +1571,8 @@ static int sculpt_cloth_filter_invoke(bContext *C, wmOperator *op, const wmEvent
SCULPT_stroke_id_next(ob);
SCULPT_undo_push_begin(ob, op);
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COORDS,
event->mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COORDS, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
ss->filter_cache->automasking = SCULPT_automasking_cache_init(sd, nullptr, ob);

@ -347,13 +347,8 @@ static int sculpt_color_filter_invoke(bContext *C, wmOperator *op, const wmEvent
return OPERATOR_CANCELLED;
}
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COLOR,
event->mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COLOR, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
FilterCache *filter_cache = ss->filter_cache;
filter_cache->active_face_set = SCULPT_FACE_SET_NONE;
filter_cache->automasking = SCULPT_automasking_cache_init(sd, nullptr, ob);

@ -8,9 +8,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_hash.h"
#include "BLI_index_range.hh"
#include "BLI_math.h"
#include "BLI_math_vector_types.hh"
#include "BLI_task.h"
#include "DNA_meshdata_types.h"
@ -25,7 +23,6 @@
#include "WM_api.h"
#include "WM_types.h"
#include "ED_screen.h"
#include "ED_view3d.h"
#include "paint_intern.h"
@ -33,20 +30,14 @@
#include "RNA_access.h"
#include "RNA_define.h"
#include "RNA_prototypes.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "bmesh.h"
#include <cmath>
#include <cstdlib>
using blender::float2;
using blender::float3;
using blender::IndexRange;
void SCULPT_filter_to_orientation_space(float r_v[3], FilterCache *filter_cache)
{
switch (filter_cache->orientation) {
@ -105,14 +96,13 @@ void SCULPT_filter_cache_init(bContext *C,
Sculpt *sd,
const int undo_type,
const int mval[2],
float area_normal_radius,
float start_strength)
float area_normal_radius)
{
SculptSession *ss = ob->sculpt;
PBVH *pbvh = ob->sculpt->pbvh;
ss->filter_cache = MEM_cnew<FilterCache>(__func__);
ss->filter_cache->start_filter_strength = start_strength;
ss->filter_cache->random_seed = rand();
if (undo_type == SCULPT_UNDO_COLOR) {
@ -348,15 +338,6 @@ static bool sculpt_mesh_filter_needs_pmap(eSculptMeshFilterType filter_type)
MESH_FILTER_SHARPEN);
}
static bool sculpt_mesh_filter_is_continuous(eSculptMeshFilterType type)
{
return (ELEM(type,
MESH_FILTER_SHARPEN,
MESH_FILTER_SMOOTH,
MESH_FILTER_RELAX,
MESH_FILTER_RELAX_FACE_SETS));
}
static void mesh_filter_task_cb(void *__restrict userdata,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
@ -398,8 +379,7 @@ static void mesh_filter_task_cb(void *__restrict userdata,
continue;
}
if (ELEM(filter_type, MESH_FILTER_RELAX, MESH_FILTER_RELAX_FACE_SETS) ||
ss->filter_cache->no_orig_co) {
if (ELEM(filter_type, MESH_FILTER_RELAX, MESH_FILTER_RELAX_FACE_SETS)) {
copy_v3_v3(orig_co, vd.co);
}
else {
@ -701,16 +681,34 @@ static void mesh_filter_surface_smooth_displace_task_cb(void *__restrict userdat
BKE_pbvh_vertex_iter_end;
}
static void sculpt_mesh_filter_apply(bContext *C, wmOperator *op)
static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *event)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
SculptSession *ss = ob->sculpt;
Sculpt *sd = CTX_data_tool_settings(C)->sculpt;
eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
float filter_strength = RNA_float_get(op->ptr, "strength");
if (event->type == LEFTMOUSE && event->val == KM_RELEASE) {
SCULPT_filter_cache_free(ss);
SCULPT_undo_push_end(ob);
SCULPT_flush_update_done(C, ob, SCULPT_UPDATE_COORDS);
return OPERATOR_FINISHED;
}
if (event->type != MOUSEMOVE) {
return OPERATOR_RUNNING_MODAL;
}
const float len = event->prev_press_xy[0] - event->xy[0];
filter_strength = filter_strength * -len * 0.001f * UI_DPI_FAC;
SCULPT_vertex_random_access_ensure(ss);
bool needs_pmap = sculpt_mesh_filter_needs_pmap(filter_type);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_pmap, false, false);
SculptThreadedTaskData data{};
data.sd = sd;
data.ob = ob;
@ -742,136 +740,22 @@ static void sculpt_mesh_filter_apply(bContext *C, wmOperator *op)
}
SCULPT_flush_update_step(C, SCULPT_UPDATE_COORDS);
}
static void sculpt_mesh_update_strength(wmOperator *op,
SculptSession *ss,
float2 prev_press_mouse,
float2 mouse)
{
const float len = prev_press_mouse[0] - mouse[0];
float filter_strength = ss->filter_cache->start_filter_strength * -len * 0.001f * UI_DPI_FAC;
RNA_float_set(op->ptr, "strength", filter_strength);
}
static void sculpt_mesh_filter_apply_with_history(bContext *C, wmOperator *op)
{
/* Event history is only stored for smooth and relax filters. */
if (!RNA_collection_length(op->ptr, "event_history")) {
sculpt_mesh_filter_apply(C, op);
return;
}
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
float2 start_mouse;
bool first = true;
float initial_strength = ss->filter_cache->start_filter_strength;
RNA_BEGIN (op->ptr, item, "event_history") {
float2 mouse;
RNA_float_get_array(&item, "mouse_event", mouse);
if (first) {
first = false;
start_mouse = mouse;
continue;
}
sculpt_mesh_update_strength(op, ss, start_mouse, mouse);
sculpt_mesh_filter_apply(C, op);
}
RNA_END;
RNA_float_set(op->ptr, "strength", initial_strength);
}
static void sculpt_mesh_filter_end(bContext *C, wmOperator * /*op*/)
{
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
SCULPT_filter_cache_free(ss);
SCULPT_undo_push_end(ob);
SCULPT_flush_update_done(C, ob, SCULPT_UPDATE_COORDS);
}
static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *event)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
SculptSession *ss = ob->sculpt;
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
if (event->type == LEFTMOUSE && event->val == KM_RELEASE) {
float initial_strength = ss->filter_cache->start_filter_strength;
sculpt_mesh_filter_end(C, op);
/* Don't update strength property if we're storing an event history. */
if (sculpt_mesh_filter_is_continuous(filter_type)) {
RNA_float_set(op->ptr, "strength", initial_strength);
}
return OPERATOR_FINISHED;
}
if (event->type != MOUSEMOVE) {
return OPERATOR_RUNNING_MODAL;
}
/* Note: some filter types are continuous, for these we store an
* event history in RNA for continuous.
* This way the user can tweak the last operator properties
* or repeat the op and get expected results. */
if (sculpt_mesh_filter_is_continuous(filter_type)) {
if (RNA_collection_length(op->ptr, "event_history") == 0) {
/* First entry is the start mouse position, event->prev_press_xy. */
PointerRNA startptr;
RNA_collection_add(op->ptr, "event_history", &startptr);
float2 mouse_start(float(event->prev_press_xy[0]), float(event->prev_press_xy[1]));
RNA_float_set_array(&startptr, "mouse_event", mouse_start);
}
PointerRNA itemptr;
RNA_collection_add(op->ptr, "event_history", &itemptr);
float2 mouse(float(event->xy[0]), float(event->xy[1]));
RNA_float_set_array(&itemptr, "mouse_event", mouse);
RNA_float_set(&itemptr, "pressure", WM_event_tablet_data(event, nullptr, nullptr));
}
float2 prev_mval(float(event->prev_press_xy[0]), float(event->prev_press_xy[1]));
float2 mval(float(event->xy[0]), float(event->xy[1]));
sculpt_mesh_update_strength(op, ss, prev_mval, mval);
bool needs_pmap = sculpt_mesh_filter_needs_pmap(filter_type);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_pmap, false, false);
sculpt_mesh_filter_apply(C, op);
return OPERATOR_RUNNING_MODAL;
}
/* Returns OPERATOR_PASS_THROUGH on success. */
static int sculpt_mesh_filter_start(bContext *C, wmOperator *op)
static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent *event)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
Sculpt *sd = CTX_data_tool_settings(C)->sculpt;
int mval[2];
RNA_int_get_array(op->ptr, "start_mouse", mval);
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
const bool use_automasking = SCULPT_is_automasking_enabled(sd, nullptr, nullptr);
const bool needs_topology_info = sculpt_mesh_filter_needs_pmap(filter_type) || use_automasking;
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_topology_info, false, false);
SculptSession *ss = ob->sculpt;
const eMeshFilterDeformAxis deform_axis = eMeshFilterDeformAxis(
RNA_enum_get(op->ptr, "deform_axis"));
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
const bool use_automasking = SCULPT_is_automasking_enabled(sd, ss, nullptr);
const bool needs_topology_info = sculpt_mesh_filter_needs_pmap(filter_type) || use_automasking;
if (deform_axis == 0) {
/* All axis are disabled, so the filter is not going to produce any deformation. */
@ -884,25 +768,21 @@ static int sculpt_mesh_filter_start(bContext *C, wmOperator *op)
/* Update the active face set manually as the paint cursor is not enabled when using the Mesh
* Filter Tool. */
float mval_fl[2] = {float(mval[0]), float(mval[1])};
float mval_fl[2] = {float(event->mval[0]), float(event->mval[1])};
SculptCursorGeometryInfo sgi;
SCULPT_cursor_geometry_info_update(C, &sgi, mval_fl, false);
}
SCULPT_vertex_random_access_ensure(ss);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_topology_info, false, false);
if (needs_topology_info) {
SCULPT_boundary_info_ensure(ob);
}
SCULPT_undo_push_begin(ob, op);
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COORDS,
mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COORDS, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
FilterCache *filter_cache = ss->filter_cache;
filter_cache->active_face_set = SCULPT_FACE_SET_NONE;
@ -946,56 +826,12 @@ static int sculpt_mesh_filter_start(bContext *C, wmOperator *op)
RNA_enum_get(op->ptr, "orientation"));
ss->filter_cache->orientation = orientation;
return OPERATOR_PASS_THROUGH;
}
static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent *event)
{
RNA_int_set_array(op->ptr, "start_mouse", event->mval);
int ret = sculpt_mesh_filter_start(C, op);
if (ret == OPERATOR_PASS_THROUGH) {
WM_event_add_modal_handler(C, op);
return OPERATOR_RUNNING_MODAL;
}
return ret;
}
static int sculpt_mesh_filter_exec(bContext *C, wmOperator *op)
{
int ret = sculpt_mesh_filter_start(C, op);
if (ret == OPERATOR_PASS_THROUGH) {
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
int iterations = RNA_int_get(op->ptr, "iteration_count");
bool has_history = RNA_collection_length(op->ptr, "event_history") > 0;
if (!has_history) {
ss->filter_cache->no_orig_co = true;
}
for (int i = 0; i < iterations; i++) {
sculpt_mesh_filter_apply_with_history(C, op);
ss->filter_cache->no_orig_co = true;
}
sculpt_mesh_filter_end(C, op);
return OPERATOR_FINISHED;
}
return ret;
WM_event_add_modal_handler(C, op);
return OPERATOR_RUNNING_MODAL;
}
void SCULPT_mesh_filter_properties(wmOperatorType *ot)
{
RNA_def_int_array(
ot->srna, "start_mouse", 2, nullptr, 0, 1 << 14, "Starting Mouse", "", 0, 1 << 14);
RNA_def_float(
ot->srna,
"area_normal_radius",
@ -1008,31 +844,6 @@ void SCULPT_mesh_filter_properties(wmOperatorType *ot)
1.0);
RNA_def_float(
ot->srna, "strength", 1.0f, -10.0f, 10.0f, "Strength", "Filter strength", -10.0f, 10.0f);
RNA_def_int(ot->srna,
"iteration_count",
1,
1,
10000,
"Repeat",
"How many times to repeat the filter",
1,
100);
/* Smooth filter requires entire event history. */
PropertyRNA *prop = RNA_def_collection_runtime(
ot->srna, "event_history", &RNA_OperatorStrokeElement, "", "");
RNA_def_property_flag(prop, PropertyFlag(int(PROP_HIDDEN) | int(PROP_SKIP_SAVE)));
}
static void sculpt_mesh_ui_exec(bContext * /*C*/, wmOperator *op)
{
uiLayout *layout = op->layout;
uiItemR(layout, op->ptr, "strength", 0, nullptr, ICON_NONE);
uiItemR(layout, op->ptr, "iteration_count", 0, nullptr, ICON_NONE);
uiItemR(layout, op->ptr, "orientation", 0, nullptr, ICON_NONE);
layout = uiLayoutRow(layout, true);
uiItemR(layout, op->ptr, "deform_axis", UI_ITEM_R_EXPAND, nullptr, ICON_NONE);
}
void SCULPT_OT_mesh_filter(wmOperatorType *ot)
@ -1046,8 +857,6 @@ void SCULPT_OT_mesh_filter(wmOperatorType *ot)
ot->invoke = sculpt_mesh_filter_invoke;
ot->modal = sculpt_mesh_filter_modal;
ot->poll = SCULPT_mode_poll;
ot->exec = sculpt_mesh_filter_exec;
ot->ui = sculpt_mesh_ui_exec;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

@ -494,8 +494,6 @@ struct FilterCache {
float (*pre_smoothed_color)[4];
ViewContext vc;
float start_filter_strength;
bool no_orig_co;
};
/**
@ -1429,8 +1427,7 @@ void SCULPT_filter_cache_init(bContext *C,
Sculpt *sd,
int undo_type,
const int mval[2],
float area_normal_radius,
float start_strength);
float area_normal_radius);
void SCULPT_filter_cache_free(SculptSession *ss);
void SCULPT_mesh_filter_properties(wmOperatorType *ot);

@ -229,8 +229,6 @@ static int sculpt_symmetrize_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
SCULPT_topology_islands_invalidate(ss);
/* Redraw. */
SCULPT_pbvh_clear(ob);
WM_event_add_notifier(C, NC_OBJECT | ND_DRAW, ob);

@ -58,7 +58,7 @@ void ED_sculpt_init_transform(bContext *C, Object *ob, const int mval[2], const
SCULPT_vertex_random_access_ensure(ss);
SCULPT_filter_cache_init(C, ob, sd, SCULPT_UNDO_COORDS, mval, 5.0, 1.0f);
SCULPT_filter_cache_init(C, ob, sd, SCULPT_UNDO_COORDS, mval, 5.0);
if (sd->transform_mode == SCULPT_TRANSFORM_MODE_RADIUS_ELASTIC) {
ss->filter_cache->transform_displacement_mode = SCULPT_TRANSFORM_DISPLACEMENT_INCREMENTAL;

@ -590,7 +590,7 @@ void ED_animedit_unlink_action(
if (nlt->strips.first == NULL) {
BLI_assert(nstrip == NULL);
BKE_nlatrack_remove_and_free(&adt->nla_tracks, nlt, true);
BKE_nlatrack_free(&adt->nla_tracks, nlt, true);
}
}
}

@ -400,21 +400,6 @@ static void saction_channel_region_message_subscribe(const wmRegionMessageSubscr
}
}
static void action_clamp_scroll(ARegion *region)
{
View2D *v2d = &region->v2d;
const float cur_height_y = BLI_rctf_size_y(&v2d->cur);
if (BLI_rctf_size_y(&v2d->cur) > BLI_rctf_size_y(&v2d->tot)) {
v2d->cur.ymin = -cur_height_y;
v2d->cur.ymax = 0;
}
else if (v2d->cur.ymin < v2d->tot.ymin) {
v2d->cur.ymin = v2d->tot.ymin;
v2d->cur.ymax = v2d->cur.ymin + cur_height_y;
}
}
static void action_main_region_listener(const wmRegionListenerParams *params)
{
ARegion *region = params->region;
@ -875,13 +860,6 @@ static void action_blend_write(BlendWriter *writer, SpaceLink *sl)
BLO_write_struct(writer, SpaceAction, sl);
}
static void action_main_region_view2d_changed(const bContext *UNUSED(C), ARegion *region)
{
/* V2D_KEEPTOT_STRICT cannot be used to clamp scrolling
* because it also clamps the x-axis to 0. */
action_clamp_scroll(region);
}
void ED_spacetype_action(void)
{
SpaceType *st = MEM_callocN(sizeof(SpaceType), "spacetype action");
@ -914,7 +892,6 @@ void ED_spacetype_action(void)
art->draw_overlay = action_main_region_draw_overlay;
art->listener = action_main_region_listener;
art->message_subscribe = saction_main_region_message_subscribe;
art->on_view2d_changed = action_main_region_view2d_changed;
art->keymapflag = ED_KEYMAP_GIZMO | ED_KEYMAP_VIEW2D | ED_KEYMAP_ANIMATION | ED_KEYMAP_FRAMES;
BLI_addhead(&st->regiontypes, art);

@ -128,55 +128,16 @@ static void draw_tile_background(const rcti *draw_rect, int colorid, int shade)
UI_draw_roundbox_aa(&draw_rect_fl, true, 5.0f, color);
}
static void file_but_enable_drag(uiBut *but,
const SpaceFile *sfile,
const FileDirEntry *file,
const char *path,
ImBuf *preview_image,
int icon,
float scale)
{
ID *id;
if ((id = filelist_file_get_id(file))) {
UI_but_drag_set_id(but, id);
if (preview_image) {
UI_but_drag_attach_image(but, preview_image, scale);
}
}
else if (sfile->browse_mode == FILE_BROWSE_MODE_ASSETS &&
(file->typeflag & FILE_TYPE_ASSET) != 0) {
char blend_path[FILE_MAX_LIBEXTRA];
if (BLO_library_path_explode(path, blend_path, NULL, NULL)) {
const int import_method = ED_fileselect_asset_import_method_get(sfile, file);
BLI_assert(import_method > -1);
UI_but_drag_set_asset(but,
&(AssetHandle){.file_data = file},
BLI_strdup(blend_path),
import_method,
icon,
preview_image,
scale);
}
}
else if (preview_image) {
UI_but_drag_set_image(but, BLI_strdup(path), icon, preview_image, scale, true);
}
else {
/* path is no more static, cannot give it directly to but... */
UI_but_drag_set_path(but, BLI_strdup(path), true);
}
}
static uiBut *file_add_icon_but(const SpaceFile *sfile,
uiBlock *block,
const char *path,
const rcti *tile_draw_rect,
int icon,
int width,
int height,
bool dimmed)
static void file_draw_icon(const SpaceFile *sfile,
uiBlock *block,
const FileDirEntry *file,
const char *path,
const rcti *tile_draw_rect,
int icon,
int width,
int height,
bool drag,
bool dimmed)
{
uiBut *but;
@ -190,7 +151,42 @@ static uiBut *file_add_icon_but(const SpaceFile *sfile,
block, UI_BTYPE_LABEL, 0, icon, x, y, width, height, NULL, 0.0f, 0.0f, a1, a2, NULL);
UI_but_func_tooltip_set(but, file_draw_tooltip_func, BLI_strdup(path), MEM_freeN);
return but;
if (drag) {
/* TODO: duplicated from file_draw_preview(). */
ID *id;
if ((id = filelist_file_get_id(file))) {
UI_but_drag_set_id(but, id);
ImBuf *preview_image = filelist_file_getimage(file);
if (preview_image) {
UI_but_drag_attach_image(but, preview_image, UI_DPI_FAC);
}
}
else if (sfile->browse_mode == FILE_BROWSE_MODE_ASSETS &&
(file->typeflag & FILE_TYPE_ASSET) != 0) {
ImBuf *preview_image = filelist_file_getimage(file);
char blend_path[FILE_MAX_LIBEXTRA];
if (BLO_library_path_explode(path, blend_path, NULL, NULL)) {
const FileAssetSelectParams *asset_params = ED_fileselect_get_asset_params(sfile);
BLI_assert(asset_params != NULL);
const int import_method = ED_fileselect_asset_import_method_get(sfile, file);
BLI_assert(import_method > -1);
UI_but_drag_set_asset(but,
&(AssetHandle){.file_data = file},
BLI_strdup(blend_path),
import_method,
icon,
preview_image,
UI_DPI_FAC);
}
}
else {
/* path is no more static, cannot give it directly to but... */
UI_but_drag_set_path(but, BLI_strdup(path), true);
}
}
}
static void file_draw_string(int sx,
@ -301,50 +297,21 @@ void file_calc_previews(const bContext *C, ARegion *region)
UI_view2d_totRect_set(v2d, sfile->layout->width, sfile->layout->height);
}
static void file_add_preview_drag_but(const SpaceFile *sfile,
uiBlock *block,
FileLayout *layout,
const FileDirEntry *file,
const char *path,
const rcti *tile_draw_rect,
ImBuf *preview_image,
const int icon,
const float scale)
{
/* Invisible button for dragging. */
rcti drag_rect = *tile_draw_rect;
/* A bit smaller than the full tile, to increase the gap between items that users can drag from
* for box select. */
BLI_rcti_pad(&drag_rect, -layout->tile_border_x, -layout->tile_border_y);
uiBut *but = uiDefBut(block,
UI_BTYPE_LABEL,
0,
"",
drag_rect.xmin,
drag_rect.ymin,
BLI_rcti_size_x(&drag_rect),
BLI_rcti_size_y(&drag_rect),
NULL,
0.0,
0.0,
0,
0,
NULL);
file_but_enable_drag(but, sfile, file, path, preview_image, icon, scale);
}
static void file_draw_preview(const FileDirEntry *file,
static void file_draw_preview(const SpaceFile *sfile,
uiBlock *block,
const FileDirEntry *file,
const char *path,
const rcti *tile_draw_rect,
const float icon_aspect,
ImBuf *imb,
const int icon,
FileLayout *layout,
const bool is_icon,
const bool drag,
const bool dimmed,
const bool is_link,
float *r_scale)
const bool is_link)
{
uiBut *but;
float fx, fy;
float dx, dy;
int xco, yco;
@ -561,11 +528,62 @@ static void file_draw_preview(const FileDirEntry *file,
immUnbindProgram();
}
GPU_blend(GPU_BLEND_NONE);
/* Invisible button for dragging. */
rcti drag_rect = *tile_draw_rect;
/* A bit smaller than the full tile, to increase the gap between items that users can drag from
* for box select. */
BLI_rcti_pad(&drag_rect, -layout->tile_border_x, -layout->tile_border_y);
if (r_scale) {
*r_scale = scale;
but = uiDefBut(block,
UI_BTYPE_LABEL,
0,
"",
drag_rect.xmin,
drag_rect.ymin,
BLI_rcti_size_x(&drag_rect),
BLI_rcti_size_y(&drag_rect),
NULL,
0.0,
0.0,
0,
0,
NULL);
/* Drag-region. */
if (drag) {
ID *id;
if ((id = filelist_file_get_id(file))) {
UI_but_drag_set_id(but, id);
UI_but_drag_attach_image(but, imb, scale);
}
/* path is no more static, cannot give it directly to but... */
else if (sfile->browse_mode == FILE_BROWSE_MODE_ASSETS &&
(file->typeflag & FILE_TYPE_ASSET) != 0) {
char blend_path[FILE_MAX_LIBEXTRA];
if (BLO_library_path_explode(path, blend_path, NULL, NULL)) {
const FileAssetSelectParams *asset_params = ED_fileselect_get_asset_params(sfile);
BLI_assert(asset_params != NULL);
const int import_method = ED_fileselect_asset_import_method_get(sfile, file);
BLI_assert(import_method > -1);
UI_but_drag_set_asset(but,
&(AssetHandle){.file_data = file},
BLI_strdup(blend_path),
import_method,
icon,
imb,
scale);
}
}
else {
UI_but_drag_set_image(but, BLI_strdup(path), icon, imb, scale, true);
}
}
GPU_blend(GPU_BLEND_NONE);
}
static void renamebutton_cb(bContext *C, void *UNUSED(arg1), char *oldname)
@ -1018,36 +1036,31 @@ void file_draw_list(const bContext *C, ARegion *region)
is_icon = 1;
}
float scale = 0;
file_draw_preview(file,
file_draw_preview(sfile,
block,
file,
path,
&tile_draw_rect,
thumb_icon_aspect,
imb,
icon,
layout,
is_icon,
do_drag,
is_hidden,
is_link,
/* Returns the scale which is needed below. */
&scale);
if (do_drag) {
file_add_preview_drag_but(
sfile, block, layout, file, path, &tile_draw_rect, imb, icon, scale);
}
is_link);
}
else {
const int icon = filelist_geticon(files, i, true);
uiBut *icon_but = file_add_icon_but(sfile,
block,
path,
&tile_draw_rect,
icon,
ICON_DEFAULT_WIDTH_SCALE,
ICON_DEFAULT_HEIGHT_SCALE,
is_hidden);
if (do_drag) {
file_but_enable_drag(icon_but, sfile, file, path, NULL, icon, UI_DPI_FAC);
}
file_draw_icon(sfile,
block,
file,
path,
&tile_draw_rect,
filelist_geticon(files, i, true),
ICON_DEFAULT_WIDTH_SCALE,
ICON_DEFAULT_HEIGHT_SCALE,
do_drag,
is_hidden);
icon_ofs += ICON_DEFAULT_WIDTH_SCALE + 0.2f * UI_UNIT_X;
}

@ -736,7 +736,7 @@ static int nlaedit_delete_tracks_exec(bContext *C, wmOperator *UNUSED(op))
}
/* call delete on this track - deletes all strips too */
BKE_nlatrack_remove_and_free(&adt->nla_tracks, nlt, true);
BKE_nlatrack_free(&adt->nla_tracks, nlt, true);
ale->update = ANIM_UPDATE_DEPS;
}
}

@ -60,7 +60,7 @@ bool space_node_view_flag(
int tot = 0;
bool has_frame = false;
if (snode.edittree) {
for (const bNode *node : snode.edittree->all_nodes()) {
LISTBASE_FOREACH (const bNode *, node, &snode.edittree->nodes) {
if ((node->flag & node_flag) == node_flag) {
BLI_rctf_union(&cur_new, &node->runtime->totr);
tot++;

@ -839,6 +839,7 @@ static OrderedAttributes gather_generic_mesh_attributes_to_propagate(
options.propagation_info,
attributes_to_propagate);
attributes_to_propagate.remove("position");
attributes_to_propagate.remove("normal");
attributes_to_propagate.remove("shade_smooth");
r_create_id = attributes_to_propagate.pop_try("id").has_value();
r_create_material_index = attributes_to_propagate.pop_try("material_index").has_value();

@ -640,10 +640,6 @@ void VKShader::compute_shader_from_glsl(MutableSpan<const char *> sources)
build_shader_module(sources, shaderc_compute_shader, &compute_module_);
}
void VKShader::warm_cache(int /*limit*/)
{
}
bool VKShader::finalize(const shader::ShaderCreateInfo *info)
{
if (compilation_failed_) {

@ -35,7 +35,7 @@ class VKShader : public Shader {
void fragment_shader_from_glsl(MutableSpan<const char *> sources) override;
void compute_shader_from_glsl(MutableSpan<const char *> sources) override;
bool finalize(const shader::ShaderCreateInfo *info = nullptr) override;
void warm_cache(int limit) override;
void warm_cache(int limit) override{};
void transform_feedback_names_set(Span<const char *> name_list,
eGPUShaderTFBType geom_type) override;

@ -593,7 +593,7 @@ static void rna_NlaTrack_remove(
return;
}
BKE_nlatrack_remove_and_free(&adt->nla_tracks, track, true);
BKE_nlatrack_free(&adt->nla_tracks, track, true);
RNA_POINTER_INVALIDATE(track_ptr);
WM_event_add_notifier(C, NC_ANIMATION | ND_NLA | NA_REMOVED, NULL);

@ -8,13 +8,13 @@
/* Macros to help reduce code clutter in rna_mesh.c */
/* Define the accessors for a basic CustomDataLayer collection, skipping anonymous layers */
/* Define the accessors for a basic CustomDataLayer collection */
#define DEFINE_CUSTOMDATA_LAYER_COLLECTION(collection_name, customdata_type, layer_type) \
/* check */ \
static int rna_##collection_name##_check(CollectionPropertyIterator *UNUSED(iter), void *data) \
{ \
CustomDataLayer *layer = (CustomDataLayer *)data; \
return (layer->anonymous_id != NULL || layer->type != layer_type); \
return (layer->type != layer_type); \
} \
/* begin */ \
static void rna_Mesh_##collection_name##s_begin(CollectionPropertyIterator *iter, \
@ -37,9 +37,7 @@
static int rna_Mesh_##collection_name##s_length(PointerRNA *ptr) \
{ \
CustomData *data = rna_mesh_##customdata_type(ptr); \
return data ? CustomData_number_of_layers(data, layer_type) - \
CustomData_number_of_anonymous_layers(data, layer_type) : \
0; \
return data ? CustomData_number_of_layers(data, layer_type) : 0; \
} \
/* index range */ \
static void rna_Mesh_##collection_name##_index_range( \
@ -47,9 +45,7 @@
{ \
CustomData *data = rna_mesh_##customdata_type(ptr); \
*min = 0; \
*max = data ? CustomData_number_of_layers(data, layer_type) - \
CustomData_number_of_anonymous_layers(data, layer_type) - 1 : \
0; \
*max = data ? CustomData_number_of_layers(data, layer_type) - 1 : 0; \
*max = MAX2(0, *max); \
}

@ -64,6 +64,18 @@ struct IndexAttributes {
/** \name Utility Functions
* \{ */
static Map<AttributeIDRef, AttributeKind> gather_attributes_without_id(
const GeometrySet &geometry_set,
const GeometryComponentType component_type,
const AnonymousAttributePropagationInfo &propagation_info)
{
Map<AttributeIDRef, AttributeKind> attributes;
geometry_set.gather_attributes_for_propagation(
{component_type}, component_type, false, propagation_info, attributes);
attributes.remove("id");
return attributes;
};
static OffsetIndices<int> accumulate_counts_to_offsets(const IndexMask selection,
const VArray<int> &counts,
Array<int> &r_offset_data)
@ -171,25 +183,39 @@ static void copy_stable_id_point(const OffsetIndices<int> offsets,
dst_attribute.finish();
}
static void copy_attributes_without_id(const OffsetIndices<int> offsets,
static void copy_attributes_without_id(GeometrySet &geometry_set,
const GeometryComponentType component_type,
const eAttrDomain domain,
const OffsetIndices<int> offsets,
const IndexMask selection,
const AnonymousAttributePropagationInfo &propagation_info,
const eAttrDomain domain,
const bke::AttributeAccessor src_attributes,
bke::MutableAttributeAccessor dst_attributes)
{
for (auto &attribute : bke::retrieve_attributes_for_transfer(src_attributes,
dst_attributes,
ATTR_DOMAIN_AS_MASK(domain),
propagation_info,
{"id"})) {
attribute_math::convert_to_static_type(attribute.src.type(), [&](auto dummy) {
const Map<AttributeIDRef, AttributeKind> attributes = gather_attributes_without_id(
geometry_set, component_type, propagation_info);
for (const Map<AttributeIDRef, AttributeKind>::Item entry : attributes.items()) {
const AttributeIDRef attribute_id = entry.key;
GAttributeReader src_attribute = src_attributes.lookup(attribute_id);
if (!src_attribute || src_attribute.domain != domain) {
continue;
}
eAttrDomain out_domain = src_attribute.domain;
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute = dst_attributes.lookup_or_add_for_write_only_span(
attribute_id, out_domain, data_type);
if (!dst_attribute) {
continue;
}
attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
const Span<T> src = attribute.src.typed<T>();
MutableSpan<T> dst = attribute.dst.span.typed<T>();
VArraySpan<T> src = src_attribute.varray.typed<T>();
MutableSpan<T> dst = dst_attribute.span.typed<T>();
threaded_slice_fill<T>(offsets, selection, src, dst);
});
attribute.dst.finish();
dst_attribute.finish();
}
}
@ -204,26 +230,42 @@ static void copy_attributes_without_id(const OffsetIndices<int> offsets,
* copied with an offset fill, otherwise a mapping is used.
*/
static void copy_curve_attributes_without_id(
const GeometrySet &geometry_set,
const bke::CurvesGeometry &src_curves,
const IndexMask selection,
const OffsetIndices<int> curve_offsets,
const AnonymousAttributePropagationInfo &propagation_info,
bke::CurvesGeometry &dst_curves)
{
Map<AttributeIDRef, AttributeKind> attributes = gather_attributes_without_id(
geometry_set, GEO_COMPONENT_TYPE_CURVE, propagation_info);
const OffsetIndices src_points_by_curve = src_curves.points_by_curve();
const OffsetIndices dst_points_by_curve = dst_curves.points_by_curve();
for (auto &attribute : bke::retrieve_attributes_for_transfer(src_curves.attributes(),
dst_curves.attributes_for_write(),
ATTR_DOMAIN_MASK_ALL,
propagation_info,
{"id"})) {
attribute_math::convert_to_static_type(attribute.src.type(), [&](auto dummy) {
using T = decltype(dummy);
const Span<T> src = attribute.src.typed<T>();
MutableSpan<T> dst = attribute.dst.span.typed<T>();
for (const Map<AttributeIDRef, AttributeKind>::Item entry : attributes.items()) {
const AttributeIDRef attribute_id = entry.key;
GAttributeReader src_attribute = src_curves.attributes().lookup(attribute_id);
if (!src_attribute) {
continue;
}
switch (attribute.meta_data.domain) {
eAttrDomain out_domain = src_attribute.domain;
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute =
dst_curves.attributes_for_write().lookup_or_add_for_write_only_span(
attribute_id, out_domain, data_type);
if (!dst_attribute) {
continue;
}
attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
VArraySpan<T> src{src_attribute.varray.typed<T>()};
MutableSpan<T> dst = dst_attribute.span.typed<T>();
switch (out_domain) {
case ATTR_DOMAIN_CURVE:
threaded_slice_fill<T>(curve_offsets, selection, src, dst);
break;
@ -239,11 +281,10 @@ static void copy_curve_attributes_without_id(
});
break;
default:
BLI_assert_unreachable();
break;
}
});
attribute.dst.finish();
dst_attribute.finish();
}
}
@ -353,7 +394,8 @@ static void duplicate_curves(GeometrySet &geometry_set,
});
all_dst_offsets.last() = dst_points_num;
copy_curve_attributes_without_id(curves, selection, curve_offsets, propagation_info, new_curves);
copy_curve_attributes_without_id(
geometry_set, curves, selection, curve_offsets, propagation_info, new_curves);
copy_stable_id_curves(curves, selection, curve_offsets, new_curves);
@ -380,6 +422,7 @@ static void duplicate_curves(GeometrySet &geometry_set,
* copied with an offset fill, otherwise a mapping is used.
*/
static void copy_face_attributes_without_id(
GeometrySet &geometry_set,
const Span<int> edge_mapping,
const Span<int> vert_mapping,
const Span<int> loop_mapping,
@ -389,14 +432,31 @@ static void copy_face_attributes_without_id(
const bke::AttributeAccessor src_attributes,
bke::MutableAttributeAccessor dst_attributes)
{
for (auto &attribute : bke::retrieve_attributes_for_transfer(
src_attributes, dst_attributes, ATTR_DOMAIN_MASK_ALL, propagation_info, {"id"})) {
attribute_math::convert_to_static_type(attribute.src.type(), [&](auto dummy) {
using T = decltype(dummy);
const Span<T> src = attribute.src.typed<T>();
MutableSpan<T> dst = attribute.dst.span.typed<T>();
Map<AttributeIDRef, AttributeKind> attributes = gather_attributes_without_id(
geometry_set, GEO_COMPONENT_TYPE_MESH, propagation_info);
switch (attribute.meta_data.domain) {
for (const Map<AttributeIDRef, AttributeKind>::Item entry : attributes.items()) {
const AttributeIDRef attribute_id = entry.key;
GAttributeReader src_attribute = src_attributes.lookup(attribute_id);
if (!src_attribute) {
continue;
}
eAttrDomain out_domain = src_attribute.domain;
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute = dst_attributes.lookup_or_add_for_write_only_span(
attribute_id, out_domain, data_type);
if (!dst_attribute) {
continue;
}
attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
VArraySpan<T> src{src_attribute.varray.typed<T>()};
MutableSpan<T> dst = dst_attribute.span.typed<T>();
switch (out_domain) {
case ATTR_DOMAIN_POINT:
array_utils::gather(src, vert_mapping, dst);
break;
@ -410,11 +470,10 @@ static void copy_face_attributes_without_id(
array_utils::gather(src, loop_mapping, dst);
break;
default:
BLI_assert_unreachable();
break;
}
});
attribute.dst.finish();
dst_attribute.finish();
}
}
@ -548,7 +607,8 @@ static void duplicate_faces(GeometrySet &geometry_set,
new_mesh->loose_edges_tag_none();
copy_face_attributes_without_id(edge_mapping,
copy_face_attributes_without_id(geometry_set,
edge_mapping,
vert_mapping,
loop_mapping,
duplicates,
@ -586,6 +646,7 @@ static void duplicate_faces(GeometrySet &geometry_set,
* copied with an offset fill, for point domain a mapping is used.
*/
static void copy_edge_attributes_without_id(
GeometrySet &geometry_set,
const Span<int> point_mapping,
const OffsetIndices<int> offsets,
const IndexMask selection,
@ -593,18 +654,30 @@ static void copy_edge_attributes_without_id(
const bke::AttributeAccessor src_attributes,
bke::MutableAttributeAccessor dst_attributes)
{
for (auto &attribute :
bke::retrieve_attributes_for_transfer(src_attributes,
dst_attributes,
ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_EDGE,
propagation_info,
{"id"})) {
attribute_math::convert_to_static_type(attribute.src.type(), [&](auto dummy) {
using T = decltype(dummy);
const Span<T> src = attribute.src.typed<T>();
MutableSpan<T> dst = attribute.dst.span.typed<T>();
Map<AttributeIDRef, AttributeKind> attributes = gather_attributes_without_id(
geometry_set, GEO_COMPONENT_TYPE_MESH, propagation_info);
switch (attribute.meta_data.domain) {
for (const Map<AttributeIDRef, AttributeKind>::Item entry : attributes.items()) {
const AttributeIDRef attribute_id = entry.key;
GAttributeReader src_attribute = src_attributes.lookup(attribute_id);
if (!src_attribute) {
continue;
}
const eAttrDomain out_domain = src_attribute.domain;
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute = dst_attributes.lookup_or_add_for_write_only_span(
attribute_id, out_domain, data_type);
if (!dst_attribute) {
continue;
}
attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
VArraySpan<T> src{src_attribute.varray.typed<T>()};
MutableSpan<T> dst = dst_attribute.span.typed<T>();
switch (out_domain) {
case ATTR_DOMAIN_EDGE:
threaded_slice_fill<T>(offsets, selection, src, dst);
break;
@ -612,11 +685,10 @@ static void copy_edge_attributes_without_id(
array_utils::gather(src, point_mapping, dst);
break;
default:
BLI_assert_unreachable();
break;
}
});
attribute.dst.finish();
dst_attribute.finish();
}
}
@ -719,7 +791,8 @@ static void duplicate_edges(GeometrySet &geometry_set,
}
});
copy_edge_attributes_without_id(vert_orig_indices,
copy_edge_attributes_without_id(geometry_set,
vert_orig_indices,
duplicates,
selection,
propagation_info,
@ -782,16 +855,32 @@ static void duplicate_points_curve(GeometrySet &geometry_set,
}
new_curve_offsets.last() = dst_num;
for (auto &attribute : bke::retrieve_attributes_for_transfer(src_curves.attributes(),
new_curves.attributes_for_write(),
ATTR_DOMAIN_MASK_ALL,
propagation_info,
{"id"})) {
attribute_math::convert_to_static_type(attribute.src.type(), [&](auto dummy) {
Map<AttributeIDRef, AttributeKind> attributes = gather_attributes_without_id(
geometry_set, GEO_COMPONENT_TYPE_CURVE, propagation_info);
for (const Map<AttributeIDRef, AttributeKind>::Item entry : attributes.items()) {
const AttributeIDRef attribute_id = entry.key;
GAttributeReader src_attribute = src_curves.attributes().lookup(attribute_id);
if (!src_attribute) {
continue;
}
eAttrDomain domain = src_attribute.domain;
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute =
new_curves.attributes_for_write().lookup_or_add_for_write_only_span(
attribute_id, domain, data_type);
if (!dst_attribute) {
continue;
}
attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
const Span<T> src = attribute.src.typed<T>();
MutableSpan<T> dst = attribute.dst.span.typed<T>();
switch (attribute.meta_data.domain) {
VArraySpan<T> src{src_attribute.varray.typed<T>()};
MutableSpan<T> dst = dst_attribute.span.typed<T>();
switch (domain) {
case ATTR_DOMAIN_CURVE:
threading::parallel_for(selection.index_range(), 512, [&](IndexRange range) {
for (const int i_selection : range) {
@ -804,11 +893,10 @@ static void duplicate_points_curve(GeometrySet &geometry_set,
threaded_slice_fill(duplicates, selection, src, dst);
break;
default:
BLI_assert_unreachable();
break;
}
});
attribute.dst.finish();
dst_attribute.finish();
}
copy_stable_id_point(duplicates, src_curves.attributes(), new_curves.attributes_for_write());
@ -852,10 +940,12 @@ static void duplicate_points_mesh(GeometrySet &geometry_set,
Mesh *new_mesh = BKE_mesh_new_nomain(duplicates.total_size(), 0, 0, 0, 0);
copy_attributes_without_id(duplicates,
copy_attributes_without_id(geometry_set,
GEO_COMPONENT_TYPE_MESH,
ATTR_DOMAIN_POINT,
duplicates,
selection,
propagation_info,
ATTR_DOMAIN_POINT,
mesh.attributes(),
new_mesh->attributes_for_write());
@ -900,10 +990,12 @@ static void duplicate_points_pointcloud(GeometrySet &geometry_set,
PointCloud *pointcloud = BKE_pointcloud_new_nomain(duplicates.total_size());
copy_attributes_without_id(duplicates,
copy_attributes_without_id(geometry_set,
GEO_COMPONENT_TYPE_POINT_CLOUD,
ATTR_DOMAIN_POINT,
duplicates,
selection,
propagation_info,
ATTR_DOMAIN_POINT,
src_points.attributes(),
pointcloud->attributes_for_write());
@ -1011,10 +1103,12 @@ static void duplicate_instances(GeometrySet &geometry_set,
dst_instances->reference_handles().slice(range).fill(new_handle);
}
copy_attributes_without_id(duplicates,
copy_attributes_without_id(geometry_set,
GEO_COMPONENT_TYPE_INSTANCES,
ATTR_DOMAIN_INSTANCE,
duplicates,
selection,
propagation_info,
ATTR_DOMAIN_INSTANCE,
src_instances.attributes(),
dst_instances->attributes_for_write());

@ -471,6 +471,9 @@ static void extrude_mesh_edges(Mesh &mesh,
}
GSpanAttributeWriter attribute = attributes.lookup_or_add_for_write_span(
id, meta_data.domain, meta_data.data_type);
if (!attribute) {
return true; /* Impossible to write the "normal" attribute. */
}
attribute_math::convert_to_static_type(meta_data.data_type, [&](auto dummy) {
using T = decltype(dummy);
@ -864,6 +867,9 @@ static void extrude_mesh_face_regions(Mesh &mesh,
}
GSpanAttributeWriter attribute = attributes.lookup_or_add_for_write_span(
id, meta_data.domain, meta_data.data_type);
if (!attribute) {
return true; /* Impossible to write the "normal" attribute. */
}
attribute_math::convert_to_static_type(meta_data.data_type, [&](auto dummy) {
using T = decltype(dummy);
@ -1152,6 +1158,9 @@ static void extrude_individual_mesh_faces(Mesh &mesh,
}
GSpanAttributeWriter attribute = attributes.lookup_or_add_for_write_span(
id, meta_data.domain, meta_data.data_type);
if (!attribute) {
return true; /* Impossible to write the "normal" attribute. */
}
attribute_math::convert_to_static_type(meta_data.data_type, [&](auto dummy) {
using T = decltype(dummy);

@ -78,14 +78,16 @@ bool node_group_poll_instance(const bNode *node,
const bNodeTree *nodetree,
const char **disabled_hint)
{
if (!node->typeinfo->poll(node->typeinfo, nodetree, disabled_hint)) {
return false;
if (node->typeinfo->poll(node->typeinfo, nodetree, disabled_hint)) {
const bNodeTree *grouptree = (const bNodeTree *)node->id;
if (grouptree) {
return nodeGroupPoll(nodetree, grouptree, disabled_hint);
}
return true; /* without a linked node tree, group node is always ok */
}
const bNodeTree *grouptree = reinterpret_cast<const bNodeTree *>(node->id);
if (!grouptree) {
return true;
}
return nodeGroupPoll(nodetree, grouptree, disabled_hint);
return false;
}
bool nodeGroupPoll(const bNodeTree *nodetree,
@ -112,9 +114,10 @@ bool nodeGroupPoll(const bNodeTree *nodetree,
return false;
}
for (const bNode *node : grouptree->all_nodes()) {
LISTBASE_FOREACH (const bNode *, node, &grouptree->nodes) {
if (node->typeinfo->poll_instance &&
!node->typeinfo->poll_instance(node, nodetree, r_disabled_hint)) {
!node->typeinfo->poll_instance(
const_cast<bNode *>(node), const_cast<bNodeTree *>(nodetree), r_disabled_hint)) {
return false;
}
}