diff --git a/backends/aoti/slim/c10/core/Device.h b/backends/aoti/slim/c10/core/Device.h index 02e88a30b1e..cc401d2b461 100644 --- a/backends/aoti/slim/c10/core/Device.h +++ b/backends/aoti/slim/c10/core/Device.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include +#include #include #include @@ -70,12 +70,7 @@ inline DeviceType parse_type(const std::string& device_string) { device_names.push_back(it.first); } } - STANDALONE_CHECK( - false, - "Expected one of ", - Join(", ", device_names), - " device type at start of device string: ", - device_string); + ET_CHECK_MSG(false, "Expected a valid device type at start of device string"); } } // namespace detail @@ -111,7 +106,7 @@ struct Device final { /// where `cpu` or `cuda` specifies the device type, and /// `:` optionally specifies a device index. /* implicit */ Device(const std::string& device_string) : Device(Type::CPU) { - STANDALONE_CHECK(!device_string.empty(), "Device string must not be empty"); + ET_CHECK_MSG(!device_string.empty(), "Device string must not be empty"); std::string device_name, device_index_str; detail::DeviceStringParsingState pstate = @@ -170,21 +165,14 @@ struct Device final { (pstate == detail::DeviceStringParsingState::kINDEX_START && device_index_str.empty()); - STANDALONE_CHECK( - !has_error, "Invalid device string: '", device_string, "'"); + ET_CHECK_MSG(!has_error, "Invalid device string"); try { if (!device_index_str.empty()) { index_ = static_cast(std::stoi(device_index_str)); } } catch (const std::exception&) { - STANDALONE_CHECK( - false, - "Could not parse device index '", - device_index_str, - "' in device string '", - device_string, - "'"); + ET_CHECK_MSG(false, "Could not parse device index in device string"); } type_ = detail::parse_type(device_name); validate(); @@ -326,13 +314,13 @@ struct Device final { // performance in micro-benchmarks. // This is safe to do, because backends that use the DeviceIndex // have a later check when we actually try to switch to that device. - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY( + ET_DCHECK_MSG( index_ >= -1, - "Device index must be -1 or non-negative, got ", + "Device index must be -1 or non-negative, got %d", static_cast(index_)); - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY( + ET_DCHECK_MSG( !is_cpu() || index_ <= 0, - "CPU device index must be -1 or zero, got ", + "CPU device index must be -1 or zero, got %d", static_cast(index_)); } }; diff --git a/backends/aoti/slim/c10/core/DeviceType.h b/backends/aoti/slim/c10/core/DeviceType.h index eb024a3595d..c7eb1044a5b 100644 --- a/backends/aoti/slim/c10/core/DeviceType.h +++ b/backends/aoti/slim/c10/core/DeviceType.h @@ -13,7 +13,7 @@ #include #include -#include +#include namespace executorch::backends::aoti::slim::c10 { enum class DeviceType : int8_t { @@ -94,7 +94,7 @@ inline std::string DeviceTypeName(DeviceType d, bool lower_case = false) { int idx = static_cast(d); if (idx < 0 || idx >= COMPILE_TIME_MAX_DEVICE_TYPES) { - STANDALONE_CHECK(false, "Unknown device: ", static_cast(d)); + ET_CHECK_MSG(false, "Unknown device"); } if (d == DeviceType::PrivateUse1) { return get_privateuse1_backend(lower_case); diff --git a/backends/aoti/slim/c10/core/Layout.h b/backends/aoti/slim/c10/core/Layout.h index 4d7b5499088..b4cf2264197 100644 --- a/backends/aoti/slim/c10/core/Layout.h +++ b/backends/aoti/slim/c10/core/Layout.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -46,7 +46,7 @@ inline std::ostream& operator<<(std::ostream& stream, c10::Layout layout) { case c10::kJagged: return stream << "Jagged"; default: - STANDALONE_CHECK(false, "Unknown layout"); + ET_CHECK_MSG(false, "Unknown layout"); } } diff --git a/backends/aoti/slim/c10/core/MemoryFormat.h b/backends/aoti/slim/c10/core/MemoryFormat.h index 68f1a6d7357..956e3c81703 100644 --- a/backends/aoti/slim/c10/core/MemoryFormat.h +++ b/backends/aoti/slim/c10/core/MemoryFormat.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include @@ -57,7 +57,7 @@ inline std::ostream& operator<<( case MemoryFormat::ChannelsLast3d: return stream << "ChannelsLast3d"; default: - STANDALONE_CHECK(false, "Unknown memory format ", memory_format); + ET_CHECK_MSG(false, "Unknown memory format"); } } @@ -79,8 +79,7 @@ inline std::vector get_channels_last_strides_2d(ArrayRef sizes) { strides[1] = strides[2] * sizes[2]; return strides; default: - STANDALONE_INTERNAL_ASSERT( - false, "ChannelsLast2d doesn't support size ", sizes.size()); + ET_DCHECK_MSG(false, "ChannelsLast2d doesn't support this size"); } } @@ -106,8 +105,7 @@ std::vector get_channels_last_strides_3d(ArrayRef sizes) { strides[1] = strides[2] * sizes[2]; return strides; default: - STANDALONE_INTERNAL_ASSERT( - false, "ChannelsLast3d doesn't support size ", sizes.size()); + ET_DCHECK_MSG(false, "ChannelsLast3d doesn't support this size"); } } diff --git a/backends/aoti/slim/c10/core/Scalar.h b/backends/aoti/slim/c10/core/Scalar.h index b46add34946..8dee107b7c3 100644 --- a/backends/aoti/slim/c10/core/Scalar.h +++ b/backends/aoti/slim/c10/core/Scalar.h @@ -1,17 +1,16 @@ #pragma once #include -#include #include #include #include #include -#include #include #include #include #include +#include // Copy-pasted from c10/core/Scalar.h, but dropping SymScalar support @@ -102,7 +101,7 @@ class Scalar { } else if (Tag::HAS_u == tag) { \ return checked_convert(v.u, #type); \ } \ - STANDALONE_CHECK(false) \ + ET_CHECK_MSG(false, "Unknown Scalar tag"); \ } // TODO: Support ComplexHalf accessor @@ -158,9 +157,9 @@ class Scalar { } Scalar operator-() const { - STANDALONE_CHECK( + ET_CHECK_MSG( !isBoolean(), - "torch boolean negative, the `-` operator, is not supported."); + "torch boolean negative, the `-` operator, is not supported"); if (isFloatingPoint()) { return Scalar(-v.d); } else if (isComplex()) { @@ -168,8 +167,7 @@ class Scalar { } else if (isIntegral(false)) { return Scalar(-v.i); } - STANDALONE_INTERNAL_ASSERT( - false, "unknown ivalue tag ", static_cast(tag)); + ET_CHECK_MSG(false, "unknown ivalue tag"); } Scalar conj() const { @@ -188,8 +186,7 @@ class Scalar { } else if (isIntegral(false)) { return std::log(v.i); } - STANDALONE_INTERNAL_ASSERT( - false, "unknown ivalue tag ", static_cast(tag)); + ET_CHECK_MSG(false, "unknown ivalue tag"); } template < @@ -219,7 +216,7 @@ class Scalar { // boolean scalar does not equal to a non boolean value return false; } else { - STANDALONE_INTERNAL_ASSERT(false); + ET_CHECK_MSG(false, "unexpected tag in equal"); } } @@ -249,7 +246,7 @@ class Scalar { // boolean scalar does not equal to a non boolean value return false; } else { - STANDALONE_INTERNAL_ASSERT(false); + ET_CHECK_MSG(false, "unexpected tag in equal"); } } @@ -276,7 +273,7 @@ class Scalar { } else if (isBoolean()) { return executorch::backends::aoti::slim::c10::ScalarType::Bool; } else { - throw std::runtime_error("Unknown scalar type."); + ET_CHECK_MSG(false, "Unknown scalar type."); } } diff --git a/backends/aoti/slim/c10/core/ScalarType.h b/backends/aoti/slim/c10/core/ScalarType.h index 6481b3d2c4b..a0549afa4ab 100644 --- a/backends/aoti/slim/c10/core/ScalarType.h +++ b/backends/aoti/slim/c10/core/ScalarType.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -17,6 +16,7 @@ #include #include #include +#include #include #include @@ -388,7 +388,7 @@ inline size_t elementSize(ScalarType t) { switch (t) { AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE) default: - STANDALONE_CHECK(false, "Unknown ScalarType"); + ET_CHECK_MSG(false, "Unknown ScalarType"); } #undef CASE_ELEMENTSIZE_CASE } @@ -492,13 +492,13 @@ inline bool isSignedType(ScalarType t) { case ScalarType::QInt32: case ScalarType::QUInt4x2: case ScalarType::QUInt2x4: - STANDALONE_CHECK(false, "isSignedType not supported for quantized types"); + ET_CHECK_MSG(false, "isSignedType not supported for quantized types"); case ScalarType::Bits1x8: case ScalarType::Bits2x4: case ScalarType::Bits4x2: case ScalarType::Bits8: case ScalarType::Bits16: - STANDALONE_CHECK(false, "Bits types are undefined"); + ET_CHECK_MSG(false, "Bits types are undefined"); CASE_ISSIGNED(UInt16); CASE_ISSIGNED(UInt32); CASE_ISSIGNED(UInt64); @@ -543,7 +543,7 @@ inline bool isSignedType(ScalarType t) { // Do not add default here, but rather define behavior of every new entry // here. `-Wswitch-enum` would raise a warning in those cases. } - STANDALONE_CHECK(false, "Unknown ScalarType ", t); + ET_CHECK_MSG(false, "Unknown ScalarType"); #undef CASE_ISSIGNED } @@ -583,7 +583,7 @@ inline ScalarType toComplexType(ScalarType t) { case ScalarType::ComplexDouble: return ScalarType::ComplexDouble; default: - STANDALONE_CHECK(false, "Unknown Complex ScalarType for ", t); + ET_CHECK_MSG(false, "Unknown Complex ScalarType"); } } @@ -678,13 +678,8 @@ inline ScalarType promoteTypes(ScalarType a, ScalarType b) { // Handle identically equal types if (isQIntType(a) || isQIntType(b)) { - STANDALONE_CHECK( - false, - "promoteTypes with quantized numbers is not handled yet; figure out " - "what the correct rules should be, offending types: ", - toString(a), - " ", - toString(b)); + ET_CHECK_MSG( + false, "promoteTypes with quantized numbers is not handled yet"); } if (isBitsType(a) || isBitsType(b)) { @@ -692,12 +687,7 @@ inline ScalarType promoteTypes(ScalarType a, ScalarType b) { } if (isFloat8Type(a) || isFloat8Type(b)) { - STANDALONE_CHECK( - false, - "Promotion for Float8 Types is not supported, attempted to promote ", - toString(a), - " and ", - toString(b)); + ET_CHECK_MSG(false, "Promotion for Float8 Types is not supported"); } if (isBarebonesUnsignedType(a) || isBarebonesUnsignedType(b)) { @@ -717,18 +707,13 @@ inline ScalarType promoteTypes(ScalarType a, ScalarType b) { if (isFloatingType(b)) { return b; } - STANDALONE_CHECK( - false, - "Promotion for uint16, uint32, uint64 types is not supported, " - "attempted to promote ", - toString(a), - " and ", - toString(b)); + ET_CHECK_MSG( + false, "Promotion for uint16, uint32, uint64 types is not supported"); } auto ix_a = dtype2index[static_cast(a)]; - STANDALONE_INTERNAL_ASSERT(ix_a != -1); + ET_DCHECK_MSG(ix_a != -1, "Invalid ScalarType a"); auto ix_b = dtype2index[static_cast(b)]; - STANDALONE_INTERNAL_ASSERT(ix_b != -1); + ET_DCHECK_MSG(ix_b != -1, "Invalid ScalarType b"); // This table axes must be consistent with index2dtype // clang-format off diff --git a/backends/aoti/slim/c10/core/SizesAndStrides.h b/backends/aoti/slim/c10/core/SizesAndStrides.h index 0b9edaccde7..28d24555d1d 100644 --- a/backends/aoti/slim/c10/core/SizesAndStrides.h +++ b/backends/aoti/slim/c10/core/SizesAndStrides.h @@ -7,6 +7,7 @@ #include #include +#include #define STANDALONE_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5 @@ -171,7 +172,11 @@ class SizesAndStrides { } void set_strides(IntArrayRef strides) { - STANDALONE_INTERNAL_ASSERT(strides.size() == size()); + ET_DCHECK_MSG( + strides.size() == size(), + "strides size (%zu) must match size (%zu)", + strides.size(), + size()); std::copy(strides.begin(), strides.end(), strides_begin()); } @@ -284,7 +289,7 @@ class SizesAndStrides { private: void resizeSlowPath(size_t newSize, size_t oldSize) { if (newSize <= STANDALONE_SIZES_AND_STRIDES_MAX_INLINE_SIZE) { - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY( + ET_DCHECK_MSG( !isInline(), "resizeSlowPath called when fast path should have been hit!"); int64_t* tempStorage = outOfLineStorage_; @@ -309,7 +314,7 @@ class SizesAndStrides { int64_t* tempStorage = // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) static_cast(malloc(storageBytes(newSize))); - STANDALONE_CHECK( + ET_CHECK_MSG( tempStorage, "Could not allocate memory to change Tensor SizesAndStrides!"); const auto bytesToCopy = oldSize * sizeof(inlineStorage_[0]); @@ -361,7 +366,7 @@ class SizesAndStrides { } void copyDataInline(const SizesAndStrides& rhs) { - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline()); + ET_DCHECK_MSG(rhs.isInline(), "rhs must be inline"); memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_)); } @@ -372,17 +377,17 @@ class SizesAndStrides { void allocateOutOfLineStorage(size_t size) { // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) outOfLineStorage_ = static_cast(malloc(storageBytes(size))); - STANDALONE_CHECK( + ET_CHECK_MSG( outOfLineStorage_, "Could not allocate memory for Tensor SizesAndStrides!"); } void resizeOutOfLineStorage(size_t newSize) { - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY(!isInline()); + ET_DCHECK_MSG(!isInline(), "must not be inline"); outOfLineStorage_ = static_cast( // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) realloc(outOfLineStorage_, storageBytes(newSize))); - STANDALONE_CHECK( + ET_CHECK_MSG( outOfLineStorage_, "Could not allocate memory for Tensor SizesAndStrides!"); } diff --git a/backends/aoti/slim/c10/core/WrapDimMinimal.h b/backends/aoti/slim/c10/core/WrapDimMinimal.h index 68c80a4abc3..ce52ebeaab7 100644 --- a/backends/aoti/slim/c10/core/WrapDimMinimal.h +++ b/backends/aoti/slim/c10/core/WrapDimMinimal.h @@ -4,6 +4,7 @@ #include #include +#include // Different from the original implementation in c10, we don't need // to support SymInt here. @@ -39,15 +40,16 @@ namespace detail { // you'll get linker errors otherwise template T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar) { - STANDALONE_CHECK( - dim_post_expr >= 0, "Rank cannot be negative but got ", dim_post_expr); + ET_CHECK_MSG( + dim_post_expr >= 0, + "Rank cannot be negative but got %ld", + static_cast(dim_post_expr)); if (dim_post_expr == 0) { - STANDALONE_CHECK( + ET_CHECK_MSG( wrap_scalar, - "Dimension specified as ", - dim, - " but tensor has no dimensions"); + "Dimension specified as %ld but tensor has no dimensions", + static_cast(dim)); return executorch::backends::aoti::slim::c10::maybe_wrap_dim( std::move(dim), /*dim_post_expr=*/1, @@ -56,17 +58,14 @@ T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar) { T min = dim_post_expr * -1; T max = dim_post_expr - 1; - STANDALONE_CHECK( + ET_CHECK_MSG( min <= dim && dim <= max, - "Dimension out of range (expected to be in range of [", - min, - ", ", - max, - "], but got ", - dim, - ")"); + "Dimension out of range (expected to be in range of [%ld, %ld], but got %ld)", + static_cast(min), + static_cast(max), + static_cast(dim)); - STANDALONE_INTERNAL_ASSERT( + ET_DCHECK_MSG( false, "should never reach here as dim should be out-of-bounds"); } } // namespace detail diff --git a/backends/aoti/slim/c10/cuda/Exception.h b/backends/aoti/slim/c10/cuda/Exception.h index bd972c1652d..3f5774c2f71 100644 --- a/backends/aoti/slim/c10/cuda/Exception.h +++ b/backends/aoti/slim/c10/cuda/Exception.h @@ -9,12 +9,14 @@ #include #include -#include +#include +#include -#define STANDALONE_CUDA_CHECK(EXPR) \ - do { \ - const cudaError_t __err = EXPR; \ - STANDALONE_CHECK(__err == cudaSuccess, cudaGetErrorString(__err)); \ +#define STANDALONE_CUDA_CHECK(EXPR) \ + do { \ + const cudaError_t __err = EXPR; \ + ET_CHECK_MSG( \ + __err == cudaSuccess, "CUDA error: %s", cudaGetErrorString(__err)); \ } while (0) #define STANDALONE_CUDA_CHECK_WARN(EXPR) \ @@ -22,7 +24,7 @@ const cudaError_t __err = EXPR; \ if (STANDALONE_UNLIKELY(__err != cudaSuccess)) { \ [[maybe_unused]] auto error_unused = cudaGetLastError(); \ - STANDALONE_WARN("CUDA warning: ", cudaGetErrorString(__err)); \ + ET_LOG(Error, "CUDA warning: %s", cudaGetErrorString(__err)); \ } \ } while (0) diff --git a/backends/aoti/slim/c10/targets.bzl b/backends/aoti/slim/c10/targets.bzl index 2bef9f5cf96..d65e0f5aa8b 100644 --- a/backends/aoti/slim/c10/targets.bzl +++ b/backends/aoti/slim/c10/targets.bzl @@ -15,7 +15,9 @@ def define_common_targets(): exclude = ["cuda/**/*.h"], ), visibility = ["@EXECUTORCH_CLIENTS"], - exported_deps = [], + exported_deps = [ + "//executorch/runtime/platform:platform", + ], ) # c10 CUDA-specific headers (requires CUDA SDK) diff --git a/backends/aoti/slim/c10/util/ArrayRef.h b/backends/aoti/slim/c10/util/ArrayRef.h index 9c7c6cd781d..62727455aaa 100644 --- a/backends/aoti/slim/c10/util/ArrayRef.h +++ b/backends/aoti/slim/c10/util/ArrayRef.h @@ -16,7 +16,7 @@ #pragma once #include -#include +#include #include #include @@ -59,7 +59,7 @@ class ArrayRef final { size_type Length; void debugCheckNullptrInvariant() { - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY( + ET_DCHECK_MSG( Data != nullptr || Length == 0, "created ArrayRef with nullptr and non-zero length! std::optional " "relies on this being illegal"); @@ -176,14 +176,14 @@ class ArrayRef final { /// front - Get the first element. constexpr const T& front() const { - STANDALONE_CHECK( + ET_CHECK_MSG( !empty(), "ArrayRef: attempted to access front() of empty list"); return Data[0]; } /// back - Get the last element. constexpr const T& back() const { - STANDALONE_CHECK( + ET_CHECK_MSG( !empty(), "ArrayRef: attempted to access back() of empty list"); return Data[Length - 1]; } @@ -195,21 +195,19 @@ class ArrayRef final { /// slice(n, m) - Take M elements of the array starting at element N constexpr ArrayRef slice(size_t N, size_t M) const { - STANDALONE_CHECK( + ET_CHECK_MSG( N + M <= size(), - "ArrayRef: invalid slice, N = ", + "ArrayRef: invalid slice, N = %zu; M = %zu; size = %zu", N, - "; M = ", M, - "; size = ", size()); return ArrayRef(data() + N, M); } /// slice(n) - Chop off the first N elements of the array. constexpr ArrayRef slice(size_t N) const { - STANDALONE_CHECK( - N <= size(), "ArrayRef: invalid slice, N = ", N, "; size = ", size()); + ET_CHECK_MSG( + N <= size(), "ArrayRef: invalid slice, N = %zu; size = %zu", N, size()); return slice(N, size() - N); } @@ -222,11 +220,10 @@ class ArrayRef final { /// Vector compatibility constexpr const T& at(size_t Index) const { - STANDALONE_CHECK( + ET_CHECK_MSG( Index < Length, - "ArrayRef: invalid index Index = ", + "ArrayRef: invalid index Index = %zu; Length = %zu", Index, - "; Length = ", Length); return Data[Index]; } diff --git a/backends/aoti/slim/c10/util/Exception.h b/backends/aoti/slim/c10/util/Exception.h deleted file mode 100644 index f83bf3f074a..00000000000 --- a/backends/aoti/slim/c10/util/Exception.h +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once - -#include - -#include -#include - -// In the standalone version, STANDALONE_CHECK throws std::runtime_error -// instead of executorch::backends::aoti::slim::c10::Error. -namespace executorch::backends::aoti::slim::c10::detail { -template -std::string torchCheckMsgImpl(const char* /*msg*/, const Args&... args) { - // This is similar to the one in c10/util/Exception.h, but does - // not depend on the more complex c10::str() function. - // ostringstream may support less data types than c10::str(), - // but should be sufficient in the standalone world. - std::ostringstream oss; - ((oss << args), ...); - return oss.str(); -} -inline const char* torchCheckMsgImpl(const char* msg) { - return msg; -} -// If there is just 1 user-provided C-string argument, use it. -inline const char* torchCheckMsgImpl(const char* /*msg*/, const char* args) { - return args; -} -} // namespace executorch::backends::aoti::slim::c10::detail - -#define STANDALONE_CHECK_MSG(cond, type, ...) \ - (::executorch::backends::aoti::slim::c10::detail::torchCheckMsgImpl( \ - "Expected " #cond \ - " to be true, but got false. " \ - "(Could this error message be improved? If so, " \ - "please report an enhancement request to PyTorch.)", \ - ##__VA_ARGS__)) -#define STANDALONE_CHECK(cond, ...) \ - if (STANDALONE_UNLIKELY_OR_CONST(!(cond))) { \ - throw std::runtime_error(STANDALONE_CHECK_MSG( \ - cond, \ - "", \ - __func__, \ - ", ", \ - __FILE__, \ - ":", \ - __LINE__, \ - ", ", \ - ##__VA_ARGS__)); \ - } -#define STANDALONE_INTERNAL_ASSERT(cond, ...) \ - if (STANDALONE_UNLIKELY_OR_CONST(!(cond))) { \ - throw std::runtime_error(STANDALONE_CHECK_MSG( \ - cond, \ - "", \ - __func__, \ - ", ", \ - __FILE__, \ - ":", \ - __LINE__, \ - ", ", \ - #cond, \ - " INTERNAL ASSERT FAILED: ", \ - ##__VA_ARGS__)); \ - } - -#define WARNING_MESSAGE_STRING(...) \ - ::executorch::backends::aoti::slim::c10::detail::torchCheckMsgImpl( \ - __VA_ARGS__) - -#ifdef DISABLE_WARN -#define _STANDALONE_WARN_WITH(...) ((void)0); -#else -#define _STANDALONE_WARN_WITH(...) \ - std::cerr << __func__ << ", " << __FILE__ << ":" << __LINE__ << ", " \ - << WARNING_MESSAGE_STRING(__VA_ARGS__) << std::endl; -#endif - -#define STANDALONE_WARN(...) _STANDALONE_WARN_WITH(__VA_ARGS__); - -#ifdef NDEBUG -// Optimized version - generates no code. -#define STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY(...) \ - while (false) \ - STANDALONE_EXPAND_MSVC_WORKAROUND(STANDALONE_INTERNAL_ASSERT(__VA_ARGS__)) -#else -#define STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY(...) \ - STANDALONE_EXPAND_MSVC_WORKAROUND(STANDALONE_INTERNAL_ASSERT(__VA_ARGS__)) -#endif diff --git a/backends/aoti/slim/c10/util/TypeCast.h b/backends/aoti/slim/c10/util/TypeCast.h index e3d65a7ef16..ff85127a870 100644 --- a/backends/aoti/slim/c10/util/TypeCast.h +++ b/backends/aoti/slim/c10/util/TypeCast.h @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -231,9 +232,8 @@ STANDALONE_HOST_DEVICE To convert(From f) { // Define separately to avoid being inlined and prevent code-size bloat [[noreturn]] inline void report_overflow(const char* name) { - std::ostringstream oss; - oss << "value cannot be converted to type " << name << " without overflow"; - throw std::runtime_error(oss.str()); // rather than domain_error (issue 33562) + ET_CHECK_MSG( + false, "value cannot be converted to type %s without overflow", name); } template diff --git a/backends/aoti/slim/c10/util/accumulate.h b/backends/aoti/slim/c10/util/accumulate.h index 578c6246b29..c962de50f18 100644 --- a/backends/aoti/slim/c10/util/accumulate.h +++ b/backends/aoti/slim/c10/util/accumulate.h @@ -2,7 +2,7 @@ #pragma once -#include +#include #include #include @@ -75,7 +75,7 @@ template < typename C, std::enable_if_t, int> = 0> inline int64_t numelements_from_dim(const int k, const C& dims) { - STANDALONE_INTERNAL_ASSERT_DEBUG_ONLY(k >= 0); + ET_DCHECK_MSG(k >= 0, "numelements_from_dim: k must be non-negative"); if (k > static_cast(dims.size())) { return 1; @@ -92,8 +92,10 @@ template < typename C, std::enable_if_t, int> = 0> inline int64_t numelements_to_dim(const int k, const C& dims) { - STANDALONE_INTERNAL_ASSERT(0 <= k); - STANDALONE_INTERNAL_ASSERT((unsigned)k <= dims.size()); + ET_CHECK_MSG(0 <= k, "numelements_to_dim: k must be non-negative"); + ET_CHECK_MSG( + (unsigned)k <= dims.size(), + "numelements_to_dim: k must not exceed dims.size()"); auto cend = dims.cbegin(); std::advance(cend, k); @@ -106,14 +108,16 @@ template < typename C, std::enable_if_t, int> = 0> inline int64_t numelements_between_dim(int k, int l, const C& dims) { - STANDALONE_INTERNAL_ASSERT(0 <= k); - STANDALONE_INTERNAL_ASSERT(0 <= l); + ET_CHECK_MSG(0 <= k, "numelements_between_dim: k must be non-negative"); + ET_CHECK_MSG(0 <= l, "numelements_between_dim: l must be non-negative"); if (k > l) { std::swap(k, l); } - STANDALONE_INTERNAL_ASSERT((unsigned)l < dims.size()); + ET_CHECK_MSG( + (unsigned)l < dims.size(), + "numelements_between_dim: l must be less than dims.size()"); auto cbegin = dims.cbegin(); auto cend = dims.cbegin(); diff --git a/backends/aoti/slim/core/SlimTensor.h b/backends/aoti/slim/core/SlimTensor.h index 9021e2db922..3eea629d26b 100644 --- a/backends/aoti/slim/core/SlimTensor.h +++ b/backends/aoti/slim/core/SlimTensor.h @@ -153,13 +153,11 @@ class SlimTensor { executorch::backends::aoti::slim::c10::IntArrayRef strides, std::optional storage_offset = std::nullopt) { const int64_t new_dim = static_cast(sizes.size()); - STANDALONE_CHECK( + ET_CHECK_MSG( new_dim == static_cast(strides.size()), - "dimensionality of sizes (", - new_dim, - ") must match dimensionality of strides (", - strides.size(), - ")"); + "dimensionality of sizes (%ld) must match dimensionality of strides (%zu)", + static_cast(new_dim), + strides.size()); std::vector new_sizes = sizes.vec(); std::vector new_strides = strides.vec(); @@ -183,7 +181,7 @@ class SlimTensor { } } } - STANDALONE_CHECK(!overflowed, "Stride calculation overflowed"); + ET_CHECK_MSG(!overflowed, "Stride calculation overflowed"); sizes_and_strides_.set_sizes(new_sizes); sizes_and_strides_.set_strides(new_strides); @@ -235,13 +233,13 @@ class SlimTensor { } SlimTensor to(executorch::backends::aoti::slim::c10::ScalarType dtype) const { - STANDALONE_CHECK(false, "TBD: to(dtype)"); + ET_CHECK_MSG(false, "TBD: to(dtype)"); } SlimTensor& copy_(const SlimTensor& other) { - STANDALONE_CHECK( + ET_CHECK_MSG( this->numel() == other.numel(), "copy_: numel of tensors must match"); - STANDALONE_CHECK(this->dtype() == other.dtype(), "copy_: dtype must match"); + ET_CHECK_MSG(this->dtype() == other.dtype(), "copy_: dtype must match"); if (this->numel() == 0) { return *this; @@ -291,7 +289,7 @@ class SlimTensor { other.device() // src device ); #else - STANDALONE_CHECK(false, "copy_: no CUDA support"); + ET_CHECK_MSG(false, "copy_: no CUDA support"); #endif } // Increment the multi-dimensional counter @@ -315,13 +313,13 @@ class SlimTensor { } else if (this->device().is_cuda()) { #ifdef USE_CUDA cudaError_t err = cudaMemset(this->data_ptr(), 0, this->nbytes()); - STANDALONE_CHECK( + ET_CHECK_MSG( err == cudaSuccess, - "CUDA memset failed: ", + "CUDA memset failed: %s", cudaGetErrorString(err)); return *this; #else - STANDALONE_CHECK(false, "CUDA support not available"); + ET_CHECK_MSG(false, "CUDA support not available"); #endif } } @@ -342,9 +340,9 @@ class SlimTensor { host_data.data(), this->nbytes(), cudaMemcpyHostToDevice); - STANDALONE_CHECK( + ET_CHECK_MSG( err == cudaSuccess, - "CUDA memcpy failed: ", + "CUDA memcpy failed: %s", cudaGetErrorString(err)); } else { std::vector host_data(this->numel(), typed_value); @@ -353,9 +351,9 @@ class SlimTensor { host_data.data(), this->nbytes(), cudaMemcpyHostToDevice); - STANDALONE_CHECK( + ET_CHECK_MSG( err == cudaSuccess, - "CUDA memcpy failed: ", + "CUDA memcpy failed: %s", cudaGetErrorString(err)); } } else { @@ -366,7 +364,7 @@ class SlimTensor { this->copy_(cpu_tensor); } #else - STANDALONE_CHECK(false, "CUDA support not available"); + ET_CHECK_MSG(false, "CUDA support not available"); #endif } else if (this->device().is_cpu()) { if (this->is_contiguous()) { @@ -448,7 +446,7 @@ class SlimTensor { value.to>()); break; default: - STANDALONE_CHECK(false, "fill_: Unsupported dtype"); + ET_CHECK_MSG(false, "fill_: Unsupported dtype"); } return *this; } @@ -492,13 +490,11 @@ class SlimTensor { // Generic element access returning SlimTensor SlimTensor operator[]( executorch::backends::aoti::slim::c10::IntArrayRef indices) const { - STANDALONE_CHECK( + ET_CHECK_MSG( indices.size() <= this->dim(), - "Number of indices (", + "Number of indices (%zu) cannot exceed tensor dimensions (%zu)", indices.size(), - ") cannot exceed tensor dimensions (", - this->dim(), - ")"); + this->dim()); if (indices.size() == this->dim()) { // Full indexing - return 0-dimensional tensor @@ -583,16 +579,16 @@ class SlimTensor { return this ->item>(); default: - STANDALONE_CHECK(false, "item(): Unsupported dtype"); + ET_CHECK_MSG(false, "item(): Unsupported dtype"); } } // Templated version to access 0-dimensional tensor template T item() const { - STANDALONE_CHECK( + ET_CHECK_MSG( this->dim() == 0, "item() can only be called on 0-dimensional tensors"); - STANDALONE_CHECK( + ET_CHECK_MSG( this->numel() == 1, "item() requires tensor to have exactly 1 element"); // For 0-dimensional tensors, directly access the single element at diff --git a/backends/aoti/slim/core/SlimTensorResize-incl.h b/backends/aoti/slim/core/SlimTensorResize-incl.h index bfc7b149d5e..b9c3a798b74 100644 --- a/backends/aoti/slim/core/SlimTensorResize-incl.h +++ b/backends/aoti/slim/core/SlimTensorResize-incl.h @@ -9,12 +9,10 @@ namespace executorch::backends::aoti::slim { inline void SlimTensor::empty_tensor_restride( executorch::backends::aoti::slim::c10::MemoryFormat memory_format) { -#ifdef DEBUG - STANDALONE_INTERNAL_ASSERT( - compute_numel() == numel_, + ET_DCHECK_MSG( + compute_numel(sizes_and_strides_.sizes_arrayref()) == numel_, "If you are seeing this error, that means empty_tensor_restride was " "called before setting correct numel"); -#endif switch (memory_format) { case executorch::backends::aoti::slim::c10::MemoryFormat::Contiguous: { // dim_ is a virtual call, don't repeat it @@ -30,30 +28,29 @@ inline void SlimTensor::empty_tensor_restride( std::max(sizes_and_strides_.size_at_unchecked(i + 1), 1), std::addressof(sizes_and_strides_.stride_at_unchecked(i))); } - STANDALONE_CHECK(!overflowed, "Stride calculation overflowed"); + ET_CHECK_MSG(!overflowed, "Stride calculation overflowed"); } break; } case executorch::backends::aoti::slim::c10::MemoryFormat::ChannelsLast: { - STANDALONE_CHECK( + ET_CHECK_MSG( dim() == 4, "required rank 4 tensor to use channels_last format"); set_sizes_and_strides(sizes(), get_channels_last_strides_2d(sizes())); break; } case executorch::backends::aoti::slim::c10::MemoryFormat::ChannelsLast3d: { - STANDALONE_CHECK( + ET_CHECK_MSG( dim() == 5, "required rank 5 tensor to use channels_last_3d format"); set_sizes_and_strides(sizes(), get_channels_last_strides_3d(sizes())); break; } case executorch::backends::aoti::slim::c10::MemoryFormat::Preserve: - STANDALONE_CHECK(false, "unsupported memory format ", memory_format); - // Cleaning warning messages, no need to break as STANDALONE_CHECK(false) + ET_CHECK_MSG(false, "unsupported memory format: Preserve"); + // Cleaning warning messages, no need to break as ET_CHECK_MSG(false) // terminates flow. // break; case executorch::backends::aoti::slim::c10::MemoryFormat::NumOptions: - STANDALONE_INTERNAL_ASSERT( - false, "invalid memory format ", memory_format); + ET_DCHECK_MSG(false, "invalid memory format: NumOptions"); } // recompute contiguous flag, as currently NHWC/NCHW flags are not mutually // exclusive see #24090 @@ -64,7 +61,7 @@ inline void _resize_bytes( MaybeOwningStorage* storage, size_t new_size_bytes, size_t storage_offset_in_bytes) { - STANDALONE_CHECK( + ET_CHECK_MSG( storage->is_resizable(), "Trying to resize storage that is not resizable"); @@ -162,11 +159,10 @@ inline SlimTensor SlimTensor::resize_( executorch::backends::aoti::slim::c10::MemoryFormat memory_format = static_cast( optional_memory_format.value()); - STANDALONE_CHECK( + ET_CHECK_MSG( memory_format != executorch::backends::aoti::slim::c10::MemoryFormat::Preserve, - "Unsupported memory format", - memory_format); + "Unsupported memory format: Preserve"); this->empty_tensor_restride(memory_format); } return *this; diff --git a/backends/aoti/slim/core/SlimTensorView-incl.h b/backends/aoti/slim/core/SlimTensorView-incl.h index c247047900c..c352a531d2c 100644 --- a/backends/aoti/slim/core/SlimTensorView-incl.h +++ b/backends/aoti/slim/core/SlimTensorView-incl.h @@ -20,25 +20,22 @@ inline SlimTensor SlimTensor::as_strided_( executorch::backends::aoti::slim::c10::IntArrayRef sizes, executorch::backends::aoti::slim::c10::IntArrayRef strides, int64_t storage_offset) { - STANDALONE_CHECK( + ET_CHECK_MSG( sizes.size() == strides.size(), - "as_strided: number of sizes (", + "as_strided: number of sizes (%zu) must equal number of strides (%zu)", sizes.size(), - ") must equal number of strides (", - strides.size(), - ")"); + strides.size()); for (size_t i = 0; i < sizes.size(); ++i) { - STANDALONE_CHECK( + ET_CHECK_MSG( sizes[i] >= 0, - "as_strided: size at dimension ", + "as_strided: size at dimension %zu is negative: %ld", i, - " is negative: ", - sizes[i]); + static_cast(sizes[i])); } - STANDALONE_CHECK( + ET_CHECK_MSG( storage_offset >= 0, - "as_strided: storage_offset must be non-negative, got: ", - storage_offset); + "as_strided: storage_offset must be non-negative, got: %ld", + static_cast(storage_offset)); this->set_sizes_and_strides(sizes, strides, storage_offset); return *this; @@ -47,9 +44,9 @@ inline SlimTensor SlimTensor::as_strided_( inline SlimTensor SlimTensor::permute( executorch::backends::aoti::slim::c10::IntArrayRef dims) const { const size_t ndim = this->dim(); - STANDALONE_CHECK( + ET_CHECK_MSG( ndim == static_cast(dims.size()), - "permute: dims length must be equal to tensor.dim()") + "permute: dims length must be equal to tensor.dim()"); executorch::backends::aoti::slim::c10::ArrayRef old_sizes = this->sizes(); executorch::backends::aoti::slim::c10::ArrayRef old_strides = this->strides(); @@ -60,7 +57,7 @@ inline SlimTensor SlimTensor::permute( for (size_t i = 0; i < ndim; i++) { int64_t d = executorch::backends::aoti::slim::c10::maybe_wrap_dim(dims[i], ndim); - STANDALONE_CHECK(!seen_dims[d], "permute: duplicate dims are not allowed"); + ET_CHECK_MSG(!seen_dims[d], "permute: duplicate dims are not allowed"); seen_dims[d] = true; new_sizes[i] = old_sizes[d]; new_strides[i] = old_strides[d]; @@ -72,7 +69,7 @@ inline SlimTensor SlimTensor::permute( } inline SlimTensor SlimTensor::transpose() const { - STANDALONE_CHECK(dim() == 2, "transpose() can only be called on 2D tensors"); + ET_CHECK_MSG(dim() == 2, "transpose() can only be called on 2D tensors"); return permute({1, 0}); } @@ -124,24 +121,21 @@ inline SlimTensor SlimTensor::reshape( inline SlimTensor SlimTensor::narrow(int64_t dim, int64_t start, int64_t length) const { - STANDALONE_CHECK( + ET_CHECK_MSG( this->dim() > 0, "narrow() cannot be applied to a 0-dim tensor."); dim = executorch::backends::aoti::slim::c10::maybe_wrap_dim( dim, static_cast(this->dim())); start = executorch::backends::aoti::slim::c10::maybe_wrap_dim( start, static_cast(this->size(dim))); - STANDALONE_CHECK(length >= 0, "narrow(): length must be non-negative."); + ET_CHECK_MSG(length >= 0, "narrow(): length must be non-negative."); int64_t end = start + length; - STANDALONE_CHECK( + ET_CHECK_MSG( end <= this->size(dim), - "Invalid range to narrow. range(", - start, - ", ", - start + length, - ") must be a subset of range(0, ", - this->size(dim), - ")."); + "Invalid range to narrow. range(%ld, %ld) must be a subset of range(0, %ld).", + static_cast(start), + static_cast(start + length), + static_cast(this->size(dim))); SlimTensor result = *this; int64_t new_storage_offset = diff --git a/backends/aoti/slim/core/Storage.h b/backends/aoti/slim/core/Storage.h index 135b44bca23..ffcaa48b36b 100644 --- a/backends/aoti/slim/core/Storage.h +++ b/backends/aoti/slim/core/Storage.h @@ -1,7 +1,6 @@ #pragma once #include #include -#include #include #ifdef USE_CUDA @@ -12,9 +11,10 @@ #include #include #include -#include #include #include +#include +#include namespace executorch::backends::aoti::slim { using DeleterFn = void (*)(void*); @@ -98,12 +98,11 @@ struct DeviceTraits { direction = cudaMemcpyDeviceToHost; cuda_device = src_device; // Use source CUDA device } else { - STANDALONE_CHECK( + ET_CHECK_MSG( src_device.index() == dst_device.index(), - "CUDA memcpy failed across different device indices: ", - src_device.index(), - "!=", - dst_device.index()); + "CUDA memcpy failed across different device indices: %d != %d", + static_cast(src_device.index()), + static_cast(dst_device.index())); } // Set up CUDA context for the appropriate device executorch::backends::aoti::slim::cuda::CUDAGuard guard(cuda_device); @@ -116,11 +115,11 @@ struct DeviceTraits { static void* allocate( size_t nbytes, const executorch::backends::aoti::slim::c10::Device& device) { - STANDALONE_CHECK(false, "Build with USE_CUDA=1 to enable CUDA support"); + ET_CHECK_MSG(false, "Build with USE_CUDA=1 to enable CUDA support"); } static void free(void* ptr) { - STANDALONE_WARN("Build with USE_CUDA=1 to enable CUDA support"); + ET_LOG(Error, "Build with USE_CUDA=1 to enable CUDA support"); } static void memcpy( @@ -129,7 +128,7 @@ struct DeviceTraits { size_t nbytes, const executorch::backends::aoti::slim::c10::Device& dst_device, const executorch::backends::aoti::slim::c10::Device& src_device) { - STANDALONE_CHECK(false, "Build with USE_CUDA=1 to enable CUDA support"); + ET_CHECK_MSG(false, "Build with USE_CUDA=1 to enable CUDA support"); } }; #endif @@ -156,7 +155,7 @@ class MaybeOwningStorage { deleter_ = DeviceTraits< executorch::backends::aoti::slim::c10::DeviceType::CUDA>::free; } else { - STANDALONE_CHECK(false, "Unsupported device type"); + ET_CHECK_MSG(false, "Unsupported device type"); } } @@ -217,10 +216,10 @@ class MaybeOwningStorage { void* src_data_ptr, size_t nbytes, const executorch::backends::aoti::slim::c10::Device& src_device) { - STANDALONE_CHECK( - dst_data_ptr, "Storage clone failed: dst_data_ptr can not be nullptr") - STANDALONE_CHECK( - src_data_ptr, "Storage clone failed: src_data_ptr can not be nullptr") + ET_CHECK_MSG( + dst_data_ptr, "Storage clone failed: dst_data_ptr can not be nullptr"); + ET_CHECK_MSG( + src_data_ptr, "Storage clone failed: src_data_ptr can not be nullptr"); if (dst_data_ptr == src_data_ptr) { return; } @@ -238,8 +237,7 @@ class MaybeOwningStorage { MaybeOwningStorage clone( const executorch::backends::aoti::slim::c10::Device& device) const { - STANDALONE_CHECK( - data_, "Storage clone failed: source data can not be nullptr") + ET_CHECK_MSG(data_, "Storage clone failed: source data can not be nullptr"); // Create a new owning storage with the specified device and same capacity MaybeOwningStorage cloned_storage(device, capacity_); diff --git a/backends/aoti/slim/cuda/Guard.h b/backends/aoti/slim/cuda/Guard.h index 2fcafce92f9..6a42b3b6735 100644 --- a/backends/aoti/slim/cuda/Guard.h +++ b/backends/aoti/slim/cuda/Guard.h @@ -73,10 +73,7 @@ struct CUDAGuard { /// Sets the current CUDA device to the passed device. Errors if the passed /// device is not a CUDA device. explicit CUDAGuard(executorch::backends::aoti::slim::c10::Device device) { - STANDALONE_CHECK( - device.is_cuda(), - "Expected a CUDA device for CUDAGuard, but got ", - device); + ET_CHECK_MSG(device.is_cuda(), "Expected a CUDA device for CUDAGuard"); set_index(device.index()); } diff --git a/backends/aoti/slim/factory/FromBlob.h b/backends/aoti/slim/factory/FromBlob.h index c7a558f72ed..5f58987d750 100644 --- a/backends/aoti/slim/factory/FromBlob.h +++ b/backends/aoti/slim/factory/FromBlob.h @@ -12,7 +12,7 @@ inline SlimTensor from_blob( executorch::backends::aoti::slim::c10::ScalarType dtype, const executorch::backends::aoti::slim::c10::Device& device = CPU_DEVICE, int64_t storage_offset = 0) { - STANDALONE_CHECK(data != nullptr, "data pointer can not be nullptr"); + ET_CHECK_MSG(data != nullptr, "data pointer can not be nullptr"); Storage storage(new MaybeOwningStorage( device, diff --git a/backends/aoti/slim/factory/Pad.h b/backends/aoti/slim/factory/Pad.h index 44a83696a14..18e262c372b 100644 --- a/backends/aoti/slim/factory/Pad.h +++ b/backends/aoti/slim/factory/Pad.h @@ -8,14 +8,14 @@ inline SlimTensor constant_pad_nd( const SlimTensor& self, executorch::backends::aoti::slim::c10::IntArrayRef pad, const executorch::backends::aoti::slim::c10::Scalar& value) { - STANDALONE_CHECK(pad.size() % 2 == 0, "Length of pad must be even"); + ET_CHECK_MSG(pad.size() % 2 == 0, "Length of pad must be even"); executorch::backends::aoti::slim::c10::IntArrayRef input_sizes = self.sizes(); int64_t l_inp = self.dim(); int64_t l_pad = static_cast(pad.size()) / 2; int64_t l_diff = l_inp - l_pad; - STANDALONE_CHECK( + ET_CHECK_MSG( l_pad <= l_inp, "Length of pad should be no more than twice the input's dimension."); @@ -54,18 +54,10 @@ inline SlimTensor constant_pad_nd( executorch::backends::aoti::slim::c10::irange((size_t)l_pad)) { auto pad_idx = pad.size() - ((i + 1) * 2); auto new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1]; - STANDALONE_CHECK( + ET_CHECK_MSG( new_dim > 0, - "The input size ", - input_sizes[l_diff + i], - ", plus negative padding ", - pad[pad_idx], - " and ", - pad[pad_idx + 1], - " resulted in a negative output size, " - "which is invalid. Check dimension ", - l_diff + i, - " of your input."); + "The input size plus negative padding resulted in a negative output size. " + "Check your input."); new_shape.emplace_back(new_dim); } @@ -98,11 +90,8 @@ inline SlimTensor pad( if (mode == "constant") { return constant_pad_nd(self, pad, value.value_or(0.0)); } - STANDALONE_CHECK( - false, - "Unsupported padding mode: ", - mode, - ". Only constant mode is available."); + ET_CHECK_MSG( + false, "Unsupported padding mode. Only constant mode is available."); } } // namespace executorch::backends::aoti::slim diff --git a/backends/aoti/slim/util/SharedPtr.h b/backends/aoti/slim/util/SharedPtr.h index 33a4def5845..10d498f3395 100644 --- a/backends/aoti/slim/util/SharedPtr.h +++ b/backends/aoti/slim/util/SharedPtr.h @@ -5,7 +5,7 @@ #include #include -#include +#include namespace executorch::backends::aoti::slim { @@ -141,11 +141,11 @@ class NonAtomicSharedPtr { return cb_ ? cb_->ptr : nullptr; } T& operator*() const { - STANDALONE_CHECK(cb_, "Dereferencing null NonAtomicSharedPtr"); + ET_CHECK_MSG(cb_, "Dereferencing null NonAtomicSharedPtr"); return *cb_->ptr; } T* operator->() const { - STANDALONE_CHECK(cb_, "Accessing member of null NonAtomicSharedPtr"); + ET_CHECK_MSG(cb_, "Accessing member of null NonAtomicSharedPtr"); return cb_->ptr; } long use_count() const noexcept { diff --git a/backends/aoti/slim/util/SizeUtil.h b/backends/aoti/slim/util/SizeUtil.h index 4eab9fc2329..234fc2e9457 100644 --- a/backends/aoti/slim/util/SizeUtil.h +++ b/backends/aoti/slim/util/SizeUtil.h @@ -6,7 +6,6 @@ #include #include -#include #include #include #include @@ -34,7 +33,7 @@ inline int64_t safe_compute_numel( bool overflowed = executorch::backends::aoti::slim::c10::safe_multiplies_u64(sizes, &n); overflowed |= (n > storage_max()); - STANDALONE_CHECK(!overflowed, "numel: integer multiplication overflow"); + ET_CHECK_MSG(!overflowed, "numel: integer multiplication overflow"); return static_cast(n); } @@ -54,7 +53,7 @@ inline std::vector safe_compute_contiguous_strides( stride = new_stride; } } - STANDALONE_CHECK( + ET_CHECK_MSG( !overflowed, "contiguous_strides: stride multiplication overflow"); } return strides; @@ -86,8 +85,7 @@ inline size_t compute_storage_nbytes_contiguous( overflowed |= executorch::backends::aoti::slim::c10::mul_overflows( size, itemsize_bytes, &size); overflowed |= size > storage_max(); - STANDALONE_CHECK( - !overflowed, "Storage size calculation overflowed with sizes=", sizes); + ET_CHECK_MSG(!overflowed, "Storage size calculation overflowed"); return static_cast(size); #else const auto numel = multiply_integers(sizes); @@ -101,13 +99,11 @@ inline size_t compute_storage_nbytes( executorch::backends::aoti::slim::c10::IntArrayRef strides, size_t itemsize_bytes, size_t storage_offset) { - STANDALONE_CHECK( + ET_CHECK_MSG( sizes.size() == strides.size(), - "dimensionality of sizes (", + "dimensionality of sizes (%zu) must match dimensionality of strides (%zu)", sizes.size(), - ") must match dimensionality of strides (", - strides.size(), - ")"); + strides.size()); // Ignore overflow checks on mobile #ifndef STANDALONE_MOBILE @@ -130,12 +126,7 @@ inline size_t compute_storage_nbytes( overflowed |= executorch::backends::aoti::slim::c10::mul_overflows( size, itemsize_bytes, &size); overflowed |= size > storage_max(); - STANDALONE_CHECK( - !overflowed, - "Storage size calculation overflowed with sizes=", - sizes, - " and strides=", - strides); + ET_CHECK_MSG(!overflowed, "Storage size calculation overflowed"); return static_cast(size); #else // size of the underlying storage is 1 bigger than the offset @@ -186,29 +177,36 @@ inline std::vector infer_size( bool overflowed = false; for (size_t dim = 0; dim < ndim; dim++) { if (shape[dim] == -1) { - STANDALONE_CHECK( + ET_CHECK_MSG( !infer_dim.has_value(), "only one dimension can be inferred"); infer_dim = dim; result_shape.push_back(-1); // placeholder } else { - STANDALONE_CHECK(shape[dim] >= 0, "invalid shape dimension ", shape[dim]); + ET_CHECK_MSG( + shape[dim] >= 0, + "invalid shape dimension %ld", + static_cast(shape[dim])); overflowed |= executorch::backends::aoti::slim::c10::mul_overflows( new_size, shape[dim], &new_size); result_shape.push_back(shape[dim]); } } - STANDALONE_CHECK(!overflowed, "shape calculation overflowed"); + ET_CHECK_MSG(!overflowed, "shape calculation overflowed"); if (infer_dim.has_value()) { - STANDALONE_CHECK( + ET_CHECK_MSG( new_size != 0, "cannot reshape tensor of 0 elements into shape with -1"); - STANDALONE_CHECK( - numel % new_size == 0, "shape is invalid for input size ", numel); + ET_CHECK_MSG( + numel % new_size == 0, + "shape is invalid for input size %ld", + static_cast(numel)); result_shape[*infer_dim] = numel / new_size; } else { - STANDALONE_CHECK( - numel == new_size, "shape is invalid for input of size ", numel); + ET_CHECK_MSG( + numel == new_size, + "shape is invalid for input of size %ld", + static_cast(numel)); } return result_shape; } @@ -282,7 +280,7 @@ inline std::optional> compute_stride( } } } - STANDALONE_CHECK(!overflowed, "overflowed while computing strides"); + ET_CHECK_MSG(!overflowed, "overflowed while computing strides"); if (view_d != -1) { return std::nullopt; // not viewable