38 std::string
read_file(
const std::string &filename,
bool binary)
43 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 47 fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
48 std::ios_base::openmode
mode = std::ios::in;
52 mode |= std::ios::binary;
55 fs.open(filename,
mode);
60 out.reserve(fs.tellg());
62 fs.seekg(0, std::ios::beg);
64 out.assign(std::istreambuf_iterator<char>(fs), std::istreambuf_iterator<char>());
65 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 67 catch(
const std::ifstream::failure &e)
78 static std::map<Format, const std::string> formats_map =
99 return formats_map[format];
104 static std::map<Channel, const std::string> channels_map =
120 return channels_map[channel];
125 static std::map<DataLayout, const std::string> dl_map =
137 static std::map<DataType, const std::string> dt_map =
165 static std::map<ActivationLayerInfo::ActivationFunction, const std::string> act_map =
189 static std::map<InterpolationPolicy, const std::string> interpolation_policy_map =
196 return interpolation_policy_map[policy];
201 static std::map<BorderMode, const std::string> border_mode_map =
208 return border_mode_map[border_mode];
213 static std::map<NormType, const std::string> norm_type_map =
220 return norm_type_map[
type];
225 static std::map<PoolingType, const std::string> pool_type_map =
232 return pool_type_map[
type];
237 static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map =
245 return output_stage_map[output_stage];
250 std::stringstream
ss;
251 std::string converted_string;
258 ss << uint32_t(value.
get<uint8_t>());
259 converted_string =
ss.str();
265 ss << int32_t(value.
get<int8_t>());
266 converted_string =
ss.str();
270 ss << value.
get<uint16_t>();
271 converted_string =
ss.str();
275 ss << value.
get<int16_t>();
276 converted_string =
ss.str();
279 ss << value.
get<uint32_t>();
280 converted_string =
ss.str();
283 ss << value.
get<int32_t>();
284 converted_string =
ss.str();
290 static_assert(
sizeof(
half) == 2,
"Half must be 16 bit");
292 converted_string =
ss.str();
298 return converted_string;
303 static const std::map<std::string, DataType> data_types =
311 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 317 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 319 catch(
const std::out_of_range &)
328 std::string res = val;
329 std::transform(res.begin(), res.end(), res.begin(),
::tolower);
336 const auto &strides =
conv_info.stride();
337 ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1),
"Stride values should be greater than or equal to 1.");
341 const unsigned int in_width =
input_shape[width_idx];
342 const unsigned int in_height =
input_shape[height_idx];
343 const unsigned int kernel_width = weights_shape[width_idx];
344 const unsigned int kernel_height = weights_shape[height_idx];
348 const unsigned int out_width = ((in_width - is_ceil) + strides.first - 1) / strides.first + is_ceil;
349 const unsigned int out_height = ((in_height - is_ceil) + strides.second - 1) / strides.second + is_ceil;
352 const int real_weight_width = (kernel_width - 1) * dilation.
x() + 1;
353 const int real_weight_height = (kernel_height - 1) * dilation.
y() + 1;
356 const int pad_width = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
357 const int pad_height = std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
360 const unsigned int pad_left = pad_width / 2;
361 const unsigned int pad_top = pad_height / 2;
362 const unsigned int pad_right = pad_width - pad_left;
363 const unsigned int pad_bottom = pad_height - pad_top;
365 PadStrideInfo same_info(strides.first, strides.second, pad_left, pad_right, pad_top, pad_bottom, rounding_type);
368 const auto out_dims =
scaled_dimensions(in_width, in_height, kernel_width, kernel_height, same_info, dilation);
376 unsigned int kernel_width,
unsigned int kernel_height,
379 const unsigned int pad_left = pad_stride_info.
pad_left();
380 const unsigned int pad_top = pad_stride_info.
pad_top();
381 const unsigned int pad_right = pad_stride_info.
pad_right();
382 const unsigned int pad_bottom = pad_stride_info.
pad_bottom();
383 const unsigned int stride_x = pad_stride_info.
stride().first;
384 const unsigned int stride_y = pad_stride_info.
stride().second;
389 const int w = stride_x * (in_width - 1) + kernel_width - (pad_left + pad_right);
390 const int h = stride_y * (in_height - 1) + kernel_height - (pad_top + pad_bottom);
392 return std::make_pair<unsigned int, unsigned int>(
w, h);
396 int kernel_width,
int kernel_height,
400 const int dilation_x = dilation.
x();
401 const int dilation_y = dilation.
y();
402 const int pad_left = pad_stride_info.
pad_left();
403 const int pad_top = pad_stride_info.
pad_top();
404 const int pad_right = pad_stride_info.
pad_right();
405 const int pad_bottom = pad_stride_info.
pad_bottom();
406 const int stride_x = pad_stride_info.
stride().first;
407 const int stride_y = pad_stride_info.
stride().second;
410 switch(pad_stride_info.
round())
413 w = static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
414 h = static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
417 w = static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
418 h = static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
426 return std::make_pair(static_cast<unsigned int>(
w), static_cast<unsigned int>(h));
433 const bool is_first_dim = (axis == 0);
435 return !is_first_dim || is_min_max || is_quantized_type;
462 const auto a = act_info.
a();
463 const auto b = act_info.
b();
471 return std::make_pair(min_activation, max_activation);
474 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensor *> tensors)
476 std::unordered_map<const ITensorInfo *, PaddingSize> res;
478 for(
const ITensor *tensor : tensors)
482 res.insert({ tensor->info(), tensor->info()->padding() });
489 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensorInfo *> infos)
491 std::unordered_map<const ITensorInfo *, PaddingSize> res;
497 res.insert({
info,
info->padding() });
506 return std::find_if(padding_map.begin(), padding_map.end(), [](
const std::pair<const ITensorInfo *, PaddingSize> &padding_info)
508 return (padding_info.first->padding() != padding_info.second);
510 != padding_map.end();
513 #ifdef ARM_COMPUTE_ASSERTS_ENABLED 514 void print_consecutive_elements(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n,
int stream_width,
const std::string &element_delim)
520 print_consecutive_elements_impl<uint8_t>(s, ptr, n, stream_width, element_delim);
526 print_consecutive_elements_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n, stream_width, element_delim);
530 print_consecutive_elements_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n, stream_width, element_delim);
534 print_consecutive_elements_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n, stream_width, element_delim);
537 print_consecutive_elements_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n, stream_width, element_delim);
540 print_consecutive_elements_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n, stream_width, element_delim);
543 print_consecutive_elements_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n, stream_width, element_delim);
546 print_consecutive_elements_impl<half>(s, reinterpret_cast<const half *>(ptr), n, stream_width, element_delim);
549 print_consecutive_elements_impl<float>(s, reinterpret_cast<const float *>(ptr), n, stream_width, element_delim);
556 int max_consecutive_elements_display_width(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n)
562 return max_consecutive_elements_display_width_impl<uint8_t>(s, ptr, n);
567 return max_consecutive_elements_display_width_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n);
570 return max_consecutive_elements_display_width_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n);
573 return max_consecutive_elements_display_width_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n);
575 return max_consecutive_elements_display_width_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n);
577 return max_consecutive_elements_display_width_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n);
579 return max_consecutive_elements_display_width_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n);
581 return max_consecutive_elements_display_width_impl<half>(s, reinterpret_cast<const half *>(ptr), n);
583 return max_consecutive_elements_display_width_impl<float>(s, reinterpret_cast<const float *>(ptr), n);
BorderMode
Methods available to handle borders.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
Check if the given reduction operation should be handled in a serial way.
Class describing the value of a pixel for any image format.
A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte.
InterpolationPolicy
Interpolation method.
Second channel (used by formats with unknown channel types).
Quantize using a fixed point multiplication.
DimensionRoundingType round() const
Get the rounding type.
quantized, symmetric fixed-point 16-bit number
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo * > infos)
Stores padding information before configuring a kernel.
ReductionOperation
Available reduction operations.
const std::string & string_from_channel(Channel channel)
Convert a channel identity into a string.
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
std::pair< unsigned int, unsigned int > deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, const PadStrideInfo &pad_stride_info)
Returns expected width and height of the deconvolution's output tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
float a() const
Get the alpha value.
void get(uint8_t &v) const
Interpret the pixel value as a U8.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
DimensionRoundingType
Dimension rounding type when down-scaling on CNNs.
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
std::stringstream ss(mlgo_str)
std::string string_from_pixel_value(const PixelValue &value, const DataType data_type)
Convert a PixelValue to a string, represented through the specific data type.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
const DataLayout data_layout
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Store the tensor's metadata.
A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling.
QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
Returns output quantization information for softmax layer.
quantized, asymmetric fixed-point 16-bit number
1 channel, 1 U16 per channel
size_t x() const
Semantic accessor for width as x.
unsigned int pad_top() const
Get the top padding.
Output values are defined to match the source pixel whose center is nearest to the sample position.
const std::string & string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
Translates a given GEMMLowp output stage to a string.
std::string lower_string(const std::string &val)
Lower a given string.
Activation Layer Information class.
decltype(strategy::transforms) typedef type
Interface for CPU tensor.
A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling.
Copyright (c) 2017-2021 Arm Limited.
ActivationFunction
Available activation functions.
1 channel, 1 F16 per channel
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode.
const std::string & string_from_norm_type(NormType type)
Translates a given normalization type to a string.
const std::string & string_from_border_mode(BorderMode border_mode)
Translates a given border mode policy to a string.
1 channel, 1 S32 per channel
16-bit brain floating-point number
3 channels, 1 U8 per channel
std::string tolower(std::string string)
Convert string to lower case.
Quantization information.
TensorShape input_shape
Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given a...
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Exponential Linear Unit ( )
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::string read_file(const std::string &filename, bool binary)
Load an entire file in memory.
Third channel (used by formats with unknown channel types).
1 channel, 1 U32 per channel
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Normalization applied within the same map in 1D region.
Channel
Available channels.
Format
Image colour formats.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
bool is_data_type_quantized_asymmetric_signed(DataType dt)
Check if a given data type is of asymmetric quantized signed type.
A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes.
Fourth channel (used by formats with unknown channel types).
unsigned int pad_right() const
Get the right padding.
Quantize using a floating point multiplication.
Padding and stride information class.
void end(TokenStream &in, bool &valid)
GEMMLowpOutputStageType
GEMMLowp output stage type.
Quantize using an integer multiplication.
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
quantized, symmetric fixed-point 8-bit number
Num samples, channels, height, width.
size_t y() const
Semantic accessor for height as y.
quantized, symmetric per channel fixed-point 8-bit number
A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes.
Lower and Upper Bounded Rectifier ( )
4 channels, 1 U8 per channel
PoolingType
Available pooling types.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Upper Bounded Rectifier ( )
Borders are left undefined.
Pixels outside the image are assumed to have the same value as the closest image pixel.
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
2 channel, 1 U8 per channel
ActivationFunction activation() const
Get the type of activation function.
float b() const
Get the beta value.
quantized, asymmetric fixed-point 8-bit number signed
64-bit floating-point number
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
DataType data_type_from_name(const std::string &name)
Convert a string to DataType.
unsigned int pad_bottom() const
Get the bottom padding.
A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes.
DataType
Available data types.
unsigned int pad_left() const
Get the left padding.
DataLayout
[DataLayout enum definition]
const std::string & string_from_pooling_type(PoolingType type)
Translates a given pooling type to a string.
NormType
The normalization type used for the normalization layer.
Normalization applied cross maps.
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Normalization applied within the same map in 2D region.
PadStrideInfo calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout=DataLayout::NCHW, const Size2D &dilation=Size2D(1u, 1u), const DimensionRoundingType &rounding_type=DimensionRoundingType::FLOOR)
Calculate padding requirements in case of SAME padding.
const std::string & string_from_format(Format format)
Convert a tensor format into a string.