38 std::string
read_file(
const std::string &filename,
bool binary)
43 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 47 fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
48 std::ios_base::openmode
mode = std::ios::in;
52 mode |= std::ios::binary;
55 fs.open(filename, mode);
60 out.reserve(fs.tellg());
62 fs.seekg(0, std::ios::beg);
64 out.assign(std::istreambuf_iterator<char>(fs), std::istreambuf_iterator<char>());
65 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 67 catch(
const std::ifstream::failure &e)
78 static std::map<Format, const std::string> formats_map =
99 return formats_map[format];
104 static std::map<Channel, const std::string> channels_map =
120 return channels_map[channel];
125 static std::map<DataLayout, const std::string> dl_map =
137 static std::map<DataType, const std::string> dt_map =
165 static std::map<ActivationLayerInfo::ActivationFunction, const std::string> act_map =
189 static std::map<MatrixPattern, const std::string> pattern_map =
197 return pattern_map[pattern];
202 static std::map<NonLinearFilterFunction, const std::string> func_map =
209 return func_map[
function];
214 static std::map<InterpolationPolicy, const std::string> interpolation_policy_map =
221 return interpolation_policy_map[policy];
226 static std::map<BorderMode, const std::string> border_mode_map =
233 return border_mode_map[border_mode];
238 static std::map<NormType, const std::string> norm_type_map =
245 return norm_type_map[
type];
250 static std::map<PoolingType, const std::string> pool_type_map =
257 return pool_type_map[
type];
262 static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map =
270 return output_stage_map[output_stage];
275 std::stringstream
ss;
276 std::string converted_string;
283 ss << uint32_t(value.
get<uint8_t>());
284 converted_string = ss.str();
290 ss << int32_t(value.
get<int8_t>());
291 converted_string = ss.str();
295 ss << value.
get<uint16_t>();
296 converted_string = ss.str();
300 ss << value.
get<int16_t>();
301 converted_string = ss.str();
304 ss << value.
get<uint32_t>();
305 converted_string = ss.str();
308 ss << value.
get<int32_t>();
309 converted_string = ss.str();
315 static_assert(
sizeof(
half) == 2,
"Half must be 16 bit");
317 converted_string = ss.str();
323 return converted_string;
328 static const std::map<std::string, DataType> data_types =
335 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 341 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 343 catch(
const std::out_of_range &)
352 std::string res = val;
353 std::transform(res.begin(), res.end(), res.begin(),
::tolower);
360 const auto &strides = conv_info.
stride();
361 ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1),
"Stride values should be greater than or equal to 1.");
365 const unsigned int in_width = input_shape[width_idx];
366 const unsigned int in_height = input_shape[height_idx];
367 const unsigned int kernel_width = weights_shape[width_idx];
368 const unsigned int kernel_height = weights_shape[height_idx];
372 const unsigned int out_width = ((in_width - is_ceil) + strides.first - 1) / strides.first + is_ceil;
373 const unsigned int out_height = ((in_height - is_ceil) + strides.second - 1) / strides.second + is_ceil;
376 const int real_weight_width = (kernel_width - 1) * dilation.
x() + 1;
377 const int real_weight_height = (kernel_height - 1) * dilation.
y() + 1;
380 const int pad_width = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
381 const int pad_height = std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
384 const unsigned int pad_left = pad_width / 2;
385 const unsigned int pad_top = pad_height / 2;
386 const unsigned int pad_right = pad_width - pad_left;
387 const unsigned int pad_bottom = pad_height - pad_top;
389 PadStrideInfo same_info(strides.first, strides.second, pad_left, pad_right, pad_top, pad_bottom, rounding_type);
392 const auto out_dims =
scaled_dimensions(in_width, in_height, kernel_width, kernel_height, same_info, dilation);
400 unsigned int kernel_width,
unsigned int kernel_height,
403 const unsigned int pad_left = pad_stride_info.
pad_left();
404 const unsigned int pad_top = pad_stride_info.
pad_top();
405 const unsigned int pad_right = pad_stride_info.
pad_right();
406 const unsigned int pad_bottom = pad_stride_info.
pad_bottom();
407 const unsigned int stride_x = pad_stride_info.
stride().first;
408 const unsigned int stride_y = pad_stride_info.
stride().second;
413 const int w = stride_x * (in_width - 1) + kernel_width - (pad_left + pad_right);
414 const int h = stride_y * (in_height - 1) + kernel_height - (pad_top + pad_bottom);
416 return std::make_pair<unsigned int, unsigned int>(
w, h);
420 int kernel_width,
int kernel_height,
424 const int dilation_x = dilation.
x();
425 const int dilation_y = dilation.
y();
426 const int pad_left = pad_stride_info.
pad_left();
427 const int pad_top = pad_stride_info.
pad_top();
428 const int pad_right = pad_stride_info.
pad_right();
429 const int pad_bottom = pad_stride_info.
pad_bottom();
430 const int stride_x = pad_stride_info.
stride().first;
431 const int stride_y = pad_stride_info.
stride().second;
434 switch(pad_stride_info.
round())
437 w =
static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
438 h =
static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
441 w =
static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
442 h =
static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
450 return std::make_pair(static_cast<unsigned int>(w), static_cast<unsigned int>(h));
457 const bool is_first_dim = (axis == 0);
459 return !is_first_dim || is_min_max || is_quantized_type;
486 const auto a = act_info.
a();
487 const auto b = act_info.
b();
490 const auto type_max_value = std::get<1>(
get_min_max(data_type)).get<int32_t>();
495 return std::make_pair(min_activation, max_activation);
498 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensor *> tensors)
500 std::unordered_map<const ITensorInfo *, PaddingSize> res;
502 for(
const ITensor *tensor : tensors)
506 res.insert({ tensor->info(), tensor->info()->padding() });
513 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensorInfo *> infos)
515 std::unordered_map<const ITensorInfo *, PaddingSize> res;
521 res.insert({
info,
info->padding() });
530 return std::find_if(padding_map.begin(), padding_map.end(), [](
const std::pair<const ITensorInfo *, PaddingSize> &padding_info)
532 return (padding_info.first->padding() != padding_info.second);
534 != padding_map.end();
537 #ifdef ARM_COMPUTE_ASSERTS_ENABLED 538 void print_consecutive_elements(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n,
int stream_width,
const std::string &element_delim)
544 print_consecutive_elements_impl<uint8_t>(s, ptr, n, stream_width, element_delim);
550 print_consecutive_elements_impl<int8_t>(s,
reinterpret_cast<const int8_t *
>(ptr), n, stream_width, element_delim);
554 print_consecutive_elements_impl<uint16_t>(s,
reinterpret_cast<const uint16_t *
>(ptr), n, stream_width, element_delim);
558 print_consecutive_elements_impl<int16_t>(s,
reinterpret_cast<const int16_t *
>(ptr), n, stream_width, element_delim);
561 print_consecutive_elements_impl<uint32_t>(s,
reinterpret_cast<const uint32_t *
>(ptr), n, stream_width, element_delim);
564 print_consecutive_elements_impl<int32_t>(s,
reinterpret_cast<const int32_t *
>(ptr), n, stream_width, element_delim);
567 print_consecutive_elements_impl<bfloat16>(s,
reinterpret_cast<const bfloat16 *
>(ptr), n, stream_width, element_delim);
570 print_consecutive_elements_impl<half>(s,
reinterpret_cast<const half *
>(ptr), n, stream_width, element_delim);
573 print_consecutive_elements_impl<float>(s,
reinterpret_cast<const float *
>(ptr), n, stream_width, element_delim);
580 int max_consecutive_elements_display_width(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n)
586 return max_consecutive_elements_display_width_impl<uint8_t>(s, ptr, n);
591 return max_consecutive_elements_display_width_impl<int8_t>(s,
reinterpret_cast<const int8_t *
>(ptr), n);
594 return max_consecutive_elements_display_width_impl<uint16_t>(s,
reinterpret_cast<const uint16_t *
>(ptr), n);
597 return max_consecutive_elements_display_width_impl<int16_t>(s,
reinterpret_cast<const int16_t *
>(ptr), n);
599 return max_consecutive_elements_display_width_impl<uint32_t>(s,
reinterpret_cast<const uint32_t *
>(ptr), n);
601 return max_consecutive_elements_display_width_impl<int32_t>(s,
reinterpret_cast<const int32_t *
>(ptr), n);
603 return max_consecutive_elements_display_width_impl<bfloat16>(s,
reinterpret_cast<const bfloat16 *
>(ptr), n);
605 return max_consecutive_elements_display_width_impl<half>(s,
reinterpret_cast<const half *
>(ptr), n);
607 return max_consecutive_elements_display_width_impl<float>(s,
reinterpret_cast<const float *
>(ptr), n);
BorderMode
Methods available to handle borders.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
Check if the given reduction operation should be handled in a serial way.
Class describing the value of a pixel for any image format.
A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte.
InterpolationPolicy
Interpolation method.
Second channel (used by formats with unknown channel types).
Quantize using a fixed point multiplication.
DimensionRoundingType round() const
Get the rounding type.
quantized, symmetric fixed-point 16-bit number
ReductionOperation
Available reduction operations.
const std::string & string_from_matrix_pattern(MatrixPattern pattern)
Convert a matrix pattern into a string.
const std::string & string_from_channel(Channel channel)
Convert a channel identity into a string.
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
std::pair< unsigned int, unsigned int > deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, const PadStrideInfo &pad_stride_info)
Returns expected width and height of the deconvolution's output tensor.
Brain floating point representation class.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
float a() const
Get the alpha value.
void get(uint8_t &v) const
Interpret the pixel value as a U8.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
DimensionRoundingType
Dimension rounding type when down-scaling on CNNs.
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
std::stringstream ss(mlgo_str)
std::string string_from_pixel_value(const PixelValue &value, const DataType data_type)
Convert a PixelValue to a string, represented through the specific data type.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
const DataLayout data_layout
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Store the tensor's metadata.
A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling.
QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
Returns output quantization information for softmax layer.
quantized, asymmetric fixed-point 16-bit number
1 channel, 1 U16 per channel
size_t x() const
Semantic accessor for width as x.
unsigned int pad_top() const
Get the top padding.
Output values are defined to match the source pixel whose center is nearest to the sample position...
const std::string & string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
Translates a given GEMMLowp output stage to a string.
std::string lower_string(const std::string &val)
Lower a given string.
Activation Layer Information class.
decltype(strategy::transforms) typedef type
Interface for Neon tensor.
A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling.
Copyright (c) 2017-2021 Arm Limited.
ActivationFunction
Available activation functions.
1 channel, 1 F16 per channel
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode...
const std::string & string_from_norm_type(NormType type)
Translates a given normalization type to a string.
const std::string & string_from_border_mode(BorderMode border_mode)
Translates a given border mode policy to a string.
1 channel, 1 S32 per channel
16-bit brain floating-point number
3 channels, 1 U8 per channel
std::string tolower(std::string string)
Convert string to lower case.
Quantization information.
TensorShape input_shape
Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given a...
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Exponential Linear Unit ( )
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::string read_file(const std::string &filename, bool binary)
Load an entire file in memory.
Third channel (used by formats with unknown channel types).
1 channel, 1 U32 per channel
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Normalization applied within the same map in 1D region.
Channel
Available channels.
Format
Image colour formats.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
bool is_data_type_quantized_asymmetric_signed(DataType dt)
Check if a given data type is of asymmetric quantized signed type.
A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes.
Fourth channel (used by formats with unknown channel types).
unsigned int pad_right() const
Get the right padding.
const std::string & string_from_non_linear_filter_function(NonLinearFilterFunction function)
Translates a given non linear function to a string.
Quantize using a floating point multiplication.
Padding and stride information class.
void end(TokenStream &in, bool &valid)
GEMMLowpOutputStageType
GEMMLowp output stage type.
Quantize using an integer multiplication.
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
quantized, symmetric fixed-point 8-bit number
Num samples, channels, height, width.
size_t y() const
Semantic accessor for height as y.
quantized, symmetric per channel fixed-point 8-bit number
A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes.
Lower and Upper Bounded Rectifier ( )
4 channels, 1 U8 per channel
PoolingType
Available pooling types.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Upper Bounded Rectifier ( )
Borders are left undefined.
Pixels outside the image are assumed to have the same value as the closest image pixel.
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo *> infos)
Stores padding information before configuring a kernel.
2 channel, 1 U8 per channel
ActivationFunction activation() const
Get the type of activation function.
float b() const
Get the beta value.
quantized, asymmetric fixed-point 8-bit number signed
64-bit floating-point number
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
DataType data_type_from_name(const std::string &name)
Convert a string to DataType.
unsigned int pad_bottom() const
Get the bottom padding.
A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes.
DataType
Available data types.
unsigned int pad_left() const
Get the left padding.
DataLayout
[DataLayout enum definition]
const std::string & string_from_pooling_type(PoolingType type)
Translates a given pooling type to a string.
NormType
The normalization type used for the normalization layer.
MatrixPattern
Available matrix patterns.
Normalization applied cross maps.
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Any other matrix pattern.
NonLinearFilterFunction
Available non linear functions.
Normalization applied within the same map in 2D region.
PadStrideInfo calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout=DataLayout::NCHW, const Size2D &dilation=Size2D(1u, 1u), const DimensionRoundingType &rounding_type=DimensionRoundingType::FLOOR)
Calculate padding requirements in case of SAME padding.
Non linear median filter.
const std::string & string_from_format(Format format)
Convert a tensor format into a string.