Add floating point atomics

This commit is contained in:
Li-Ta Lo 2021-03-11 08:19:51 -07:00
parent 91d13bdfb2
commit b590a8ebb2
6 changed files with 185 additions and 5 deletions

@ -225,6 +225,44 @@ VTKM_EXEC_CONT inline bool AtomicCompareExchangeImpl(T* addr,
return false; return false;
} }
} }
#if __CUDA_ARCH__ < 200
VTKM_EXEC_CONT inline vtkm::Float32 vtkmAtomicAddImpl(vtkm::Float32* address,
vtkm::Float32 value,
vtkm::MemoryOrder order)
{
AtomicStoreFence(order);
vtkm::UInt32 assumed;
vtkm::UInt32 old = __float_as_int(*address);
do
{
assumed = old;
old = atomicCAS(reinterpret_cast<volatile vtkm::UInt32*>(address),
assumed,
__float_as_int(__int_as_float(assumed) + value));
} while (assumed != old);
AtomicLoadFence(order);
return __int_as_float(old);
}
#endif
#if __CUDA_ARCH__ < 600
VTKM_EXEC_CONT inline vtkm::Float64 vtkmAtomicAdd(vtkm::Float64* address,
vtkm::Float64 value,
vtkm::MemoryOrder order)
{
AtomicStoreFence(order);
vtkm::UInt64 assumed;
vtkm::UInt64 old = __double_as_longlong(*address);
do
{
assumed = old;
old = atomicCAS(reinterpret_cast<volatile vtkm::UInt64*>(address),
assumed,
__double_as_longlong(__longlong_as_double(assumed) + value));
} while (assumed != old);
AtomicLoadFence(order);
return __longlong_as_double(old);
}
#endif
} }
} // namespace vtkm::detail } // namespace vtkm::detail
@ -532,6 +570,39 @@ VTKM_ATOMIC_OPS_FOR_TYPE(vtkm::UInt32, LONG, )
VTKM_ATOMIC_OPS_FOR_TYPE(vtkm::UInt64, LONG64, 64) VTKM_ATOMIC_OPS_FOR_TYPE(vtkm::UInt64, LONG64, 64)
#undef VTKM_ATOMIC_OPS_FOR_TYPE #undef VTKM_ATOMIC_OPS_FOR_TYPE
VTKM_EXEC_CONT inline vtkm::Float32 AtomicAddImpl(vtkm::Float32* address,
vtkm::Float32 value,
vtkm::MemoryOrder vtkmNotUsed(order))
{
LONG assumed;
LONG old = BitCast<LONG>(*address);
do
{
assumed = old;
old = _InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(address),
BitCast<LONG>(BitCast<vtkm::Float32>(assumed) + value),
assumed);
} while (assumed != old);
return BitCast<vtkm::Float32>(old);
}
VTKM_EXEC_CONT inline vtkm::Float64 AtomicAddImpl(vtkm::Float64* address,
vtkm::Float64 value,
vtkm::MemoryOrder vtkmNotUsed(order))
{
LONG64 assumed;
LONG64 old = BitCast<LONG64>(*address);
do
{
assumed = old;
old = _InterlockedCompareExchange64(reinterpret_cast<volatile LONG64*>(address),
BitCast<LONG64>(BitCast<vtkm::Float64>(assumed) + value),
assumed);
} while (assumed != old);
return BitCast<vtkm::Float64>(old);
}
} }
} // namespace vtkm::detail } // namespace vtkm::detail
@ -585,6 +656,52 @@ VTKM_EXEC_CONT inline T AtomicAddImpl(T* addr, T arg, vtkm::MemoryOrder order)
return __atomic_fetch_add(addr, arg, GccAtomicMemOrder(order)); return __atomic_fetch_add(addr, arg, GccAtomicMemOrder(order));
} }
#include <vtkmstd/bit_cast.h>
// TODO: Use enable_if to write one version for both Float32 and Float64.
VTKM_EXEC_CONT inline vtkm::Float32 AtomicAddImpl(vtkm::Float32* addr,
vtkm::Float32 arg,
vtkm::MemoryOrder order)
{
vtkm::UInt32 expected = vtkmstd::bit_cast<vtkm::UInt32>(*addr);
vtkm::UInt32 desired;
do
{
desired = vtkmstd::bit_cast<vtkm::UInt32>(vtkmstd::bit_cast<vtkm::Float32>(expected) + arg);
} while (
!__atomic_compare_exchange_n(reinterpret_cast<vtkm::UInt32*>(addr),
&expected, // reloads expected with *addr prior to the operation
desired,
false,
GccAtomicMemOrder(order),
GccAtomicMemOrder(order)));
// return the "old" value that was in the memory.
return vtkmstd::bit_cast<vtkm::Float32>(expected);
}
// TODO: Use enable_if to write one version for both Float32 and Float64.
VTKM_EXEC_CONT inline vtkm::Float64 AtomicAddImpl(vtkm::Float64* addr,
vtkm::Float64 arg,
vtkm::MemoryOrder order)
{
vtkm::UInt64 expected = vtkmstd::bit_cast<vtkm::UInt64>(*addr);
vtkm::UInt64 desired;
do
{
desired = vtkmstd::bit_cast<vtkm::UInt64>(vtkmstd::bit_cast<vtkm::Float64>(expected) + arg);
} while (
!__atomic_compare_exchange_n(reinterpret_cast<vtkm::UInt64*>(addr),
&expected, // reloads expected with *addr prior to the operation
desired,
false,
GccAtomicMemOrder(order),
GccAtomicMemOrder(order)));
// return the "old" value that was in the memory.
return vtkmstd::bit_cast<vtkm::Float64>(expected);
}
template <typename T> template <typename T>
VTKM_EXEC_CONT inline T AtomicAndImpl(T* addr, T mask, vtkm::MemoryOrder order) VTKM_EXEC_CONT inline T AtomicAndImpl(T* addr, T mask, vtkm::MemoryOrder order)
{ {
@ -706,7 +823,7 @@ VTKM_EXEC_CONT inline T AtomicAdd(
{ {
return detail::AtomicAddImpl(pointer, operand, order); return detail::AtomicAddImpl(pointer, operand, order);
} }
template <typename T> template <typename T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
VTKM_EXEC_CONT inline T AtomicAdd( VTKM_EXEC_CONT inline T AtomicAdd(
T* pointer, T* pointer,
detail::OppositeSign<T> operand, detail::OppositeSign<T> operand,
@ -846,7 +963,7 @@ VTKM_EXEC_CONT inline T AtomicNot(
/// pointing to an object on the stack). /// pointing to an object on the stack).
/// ///
template <typename T> template <typename T>
VTKM_EXEC_CONT inline T AtomicCompareExchange( VTKM_EXEC_CONT inline bool AtomicCompareExchange(
T* shared, T* shared,
T* expected, T* expected,
T desired, T desired,

@ -26,7 +26,8 @@ namespace cont
/// \brief A type list containing types that can be used with an AtomicArray. /// \brief A type list containing types that can be used with an AtomicArray.
/// ///
/// @cond NONE /// @cond NONE
using AtomicArrayTypeList = vtkm::List<vtkm::UInt32, vtkm::Int32, vtkm::UInt64, vtkm::Int64>; using AtomicArrayTypeList =
vtkm::List<vtkm::UInt32, vtkm::Int32, vtkm::UInt64, vtkm::Int64, vtkm::Float32, vtkm::Float64>;
struct VTKM_DEPRECATED(1.6, struct VTKM_DEPRECATED(1.6,
"AtomicArrayTypeListTag replaced by AtomicArrayTypeList. Note that the " "AtomicArrayTypeListTag replaced by AtomicArrayTypeList. Note that the "

@ -89,7 +89,7 @@ void TestCheckAtomicArray()
"Check for 32-bit int failed."); "Check for 32-bit int failed.");
VTKM_TEST_ASSERT((TypeCheck<TypeCheckTagAtomicArray, Int64Array>::value), VTKM_TEST_ASSERT((TypeCheck<TypeCheckTagAtomicArray, Int64Array>::value),
"Check for 64-bit int failed."); "Check for 64-bit int failed.");
VTKM_TEST_ASSERT(!(TypeCheck<TypeCheckTagAtomicArray, FloatArray>::value), VTKM_TEST_ASSERT((TypeCheck<TypeCheckTagAtomicArray, FloatArray>::value),
"Check for float failed."); "Check for float failed.");
} }

@ -49,6 +49,32 @@ struct MakeUnsigned<vtkm::Int64>
{ {
using type = vtkm::UInt64; using type = vtkm::UInt64;
}; };
template <>
struct MakeUnsigned<vtkm::Float32>
{
using type = vtkm::UInt32;
};
template <>
struct MakeUnsigned<vtkm::Float64>
{
using type = vtkm::UInt64;
};
template <typename T>
struct ArithType
{
using type = typename MakeUnsigned<T>::type;
};
template <>
struct ArithType<vtkm::Float32>
{
using type = vtkm::Float32;
};
template <>
struct ArithType<vtkm::Float64>
{
using type = vtkm::Float64;
};
} }
template <typename T, typename... MaybeDevice> template <typename T, typename... MaybeDevice>
@ -168,7 +194,7 @@ public:
// This is safe, since the only difference between signed/unsigned types // This is safe, since the only difference between signed/unsigned types
// is how overflow works, and signed overflow is already undefined. We also // is how overflow works, and signed overflow is already undefined. We also
// document that overflow is undefined for this operation. // document that overflow is undefined for this operation.
using APIType = typename detail::MakeUnsigned<ValueType>::type; using APIType = typename detail::ArithType<ValueType>::type;
return static_cast<T>( return static_cast<T>(
vtkm::AtomicAdd(reinterpret_cast<APIType*>(this->Data + index), static_cast<APIType>(value))); vtkm::AtomicAdd(reinterpret_cast<APIType*>(this->Data + index), static_cast<APIType>(value)));

@ -10,6 +10,7 @@
set(headers set(headers
aligned_union.h aligned_union.h
bit_cast.h
integer_sequence.h integer_sequence.h
is_trivial.h is_trivial.h
void_t.h void_t.h

35
vtkmstd/bit_cast.h Normal file

@ -0,0 +1,35 @@
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#ifndef vtk_m_std_bit_cast_h
#define vtk_m_std_bit_cast_h
#include <cstring>
#include <type_traits>
namespace vtkmstd
{
// Copy/Paste from cppreference.com
template <class To, class From>
typename std::enable_if<sizeof(To) == sizeof(From) && std::is_trivially_copyable<From>::value &&
std::is_trivially_copyable<To>::value,
To>::type
// constexpr support needs compiler magic
bit_cast(const From& src) noexcept
{
static_assert(
std::is_trivially_constructible<To>::value,
"This implementation additionally requires destination type to be trivially constructible");
To dst;
std::memcpy(&dst, &src, sizeof(To));
return dst;
}
}
#endif //vtk_m_std_bit_cast_h