14 template <typename T, typename std::enable_if<std::is_integral<T>::value, T>::type* =
nullptr>
18 return std::isnan(
static_cast<double>(x));
21 template <typename T, typename std::enable_if<!std::is_integral<T>::value, T>::type * =
nullptr>
29 template<
typename QuantizedType>
32 static_assert(IsQuantizedType<QuantizedType>(),
"Not an integer type.");
33 constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
34 constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
35 if (std::isnan(value))
40 float clampedValue = std::min(std::max((
static_cast<float>(offset) +
static_cast<float>(round(value/scale))),
41 static_cast<float>(min)),
static_cast<float>(max));
42 auto quantizedBits =
static_cast<QuantizedType
>(clampedValue);
47 template <
typename QuantizedType>
50 static_assert(IsQuantizedType<QuantizedType>(),
"Not an integer type.");
51 if (std::isnan(value))
55 return (armnn::numeric_cast<float>(value - offset)) * scale;
60 int8_t armnn::Quantize<int8_t>(
float value,
float scale, int32_t offset);
64 uint8_t armnn::Quantize<uint8_t>(
float value,
float scale, int32_t offset);
68 int16_t armnn::Quantize<int16_t>(
float value,
float scale, int32_t offset);
72 int32_t armnn::Quantize<int32_t>(
float value,
float scale, int32_t offset);
76 float armnn::Dequantize<int8_t>(int8_t value,
float scale, int32_t offset);
80 float armnn::Dequantize<uint8_t>(uint8_t value,
float scale, int32_t offset);
84 float armnn::Dequantize<int16_t>(int16_t value,
float scale, int32_t offset);
88 float armnn::Dequantize<int32_t>(int32_t value,
float scale, int32_t offset);
92 float armnn::Dequantize<int64_t>(int64_t value,
float scale, int32_t offset);