ArmNN
 25.11
Loading...
Searching...
No Matches
TypesUtils.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
8
9namespace
10{
11/// Workaround for std:isnan() not being implemented correctly for integral types in MSVC.
12/// https://stackoverflow.com/a/56356405
13/// @{
14template <typename T, typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
15inline int IsNan(T x)
16{
17 // The spec defines integral types to be handled as if they were casted to doubles.
18 return std::isnan(static_cast<double>(x));
19}
20
21template <typename T, typename std::enable_if<!std::is_integral<T>::value, T>::type * = nullptr>
22inline int IsNan(T x)
23{
24 return std::isnan(x);
25}
26/// @}
27} // namespace std
28
29template<typename QuantizedType>
30QuantizedType armnn::Quantize(float value, float scale, int32_t offset)
31{
32 static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
33 constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
34 constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
35 if (std::isnan(value))
36 {
37 throw armnn::InvalidArgumentException("Quantize: Value is NaN");
38 }
39
40 float clampedValue = std::min(std::max((static_cast<float>(offset) + static_cast<float>(round(value/scale))),
41 static_cast<float>(min)), static_cast<float>(max));
42 auto quantizedBits = static_cast<QuantizedType>(clampedValue);
43
44 return quantizedBits;
45}
46
47template <typename QuantizedType>
48float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
49{
50 static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
51 if (std::isnan(value))
52 {
53 throw armnn::InvalidArgumentException("Dequantize: Value is NaN");
54 }
55 return (armnn::numeric_cast<float>(value - offset)) * scale;
56}
57
58/// Explicit specialization of Quantize for int8_t
59template
60int8_t armnn::Quantize<int8_t>(float value, float scale, int32_t offset);
61
62/// Explicit specialization of Quantize for uint8_t
63template
64uint8_t armnn::Quantize<uint8_t>(float value, float scale, int32_t offset);
65
66/// Explicit specialization of Quantize for int16_t
67template
68int16_t armnn::Quantize<int16_t>(float value, float scale, int32_t offset);
69
70/// Explicit specialization of Quantize for int32_t
71template
72int32_t armnn::Quantize<int32_t>(float value, float scale, int32_t offset);
73
74/// Explicit specialization of Quantize for int64_t
75template
76int64_t armnn::Quantize<int64_t>(float value, float scale, int32_t offset);
77
78/// Explicit specialization of Dequantize for int8_t
79template
80float armnn::Dequantize<int8_t>(int8_t value, float scale, int32_t offset);
81
82/// Explicit specialization of Dequantize for uint8_t
83template
84float armnn::Dequantize<uint8_t>(uint8_t value, float scale, int32_t offset);
85
86/// Explicit specialization of Dequantize for int16_t
87template
88float armnn::Dequantize<int16_t>(int16_t value, float scale, int32_t offset);
89
90/// Explicit specialization of Dequantize for int32_t
91template
92float armnn::Dequantize<int32_t>(int32_t value, float scale, int32_t offset);
93
94/// Explicit specialization of Dequantize for int64_t
95template
96float armnn::Dequantize<int64_t>(int64_t value, float scale, int32_t offset);
constexpr bool IsQuantizedType()
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)