38 std::string
read_file(
const std::string &filename,
bool binary)
43 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 47 fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
48 std::ios_base::openmode
mode = std::ios::in;
52 mode |= std::ios::binary;
55 fs.open(filename, mode);
60 out.reserve(fs.tellg());
62 fs.seekg(0, std::ios::beg);
64 out.assign(std::istreambuf_iterator<char>(fs), std::istreambuf_iterator<char>());
65 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 67 catch(
const std::ifstream::failure &e)
78 static std::map<Format, const std::string> formats_map =
99 return formats_map[format];
104 static std::map<Channel, const std::string> channels_map =
120 return channels_map[channel];
125 static std::map<DataLayout, const std::string> dl_map =
137 static std::map<DataType, const std::string> dt_map =
165 static std::map<ActivationLayerInfo::ActivationFunction, const std::string> act_map =
189 static std::map<InterpolationPolicy, const std::string> interpolation_policy_map =
196 return interpolation_policy_map[policy];
201 static std::map<BorderMode, const std::string> border_mode_map =
208 return border_mode_map[border_mode];
213 static std::map<NormType, const std::string> norm_type_map =
220 return norm_type_map[
type];
225 static std::map<PoolingType, const std::string> pool_type_map =
232 return pool_type_map[
type];
242 const auto pool_le_padding_x = info.
pool_size.
x() <= std::max({ ps.pad_left(), ps.pad_right() });
243 const auto pool_le_padding_y = info.
pool_size.
y() <= std::max({ ps.pad_top(), ps.pad_bottom() });
244 return pool_le_padding_x || pool_le_padding_y;
249 static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map =
257 return output_stage_map[output_stage];
262 std::stringstream
ss;
263 std::string converted_string;
270 ss << uint32_t(value.
get<uint8_t>());
271 converted_string = ss.str();
277 ss << int32_t(value.
get<int8_t>());
278 converted_string = ss.str();
282 ss << value.
get<uint16_t>();
283 converted_string = ss.str();
287 ss << value.
get<int16_t>();
288 converted_string = ss.str();
291 ss << value.
get<uint32_t>();
292 converted_string = ss.str();
295 ss << value.
get<int32_t>();
296 converted_string = ss.str();
302 static_assert(
sizeof(
half) == 2,
"Half must be 16 bit");
304 converted_string = ss.str();
310 return converted_string;
315 static const std::map<std::string, DataType> data_types =
323 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 329 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED 331 catch(
const std::out_of_range &)
340 std::string res = val;
341 std::transform(res.begin(), res.end(), res.begin(),
::tolower);
347 std::string res = val;
348 std::transform(res.begin(), res.end(), res.begin(), ::toupper);
355 const auto &strides = conv_info.
stride();
356 ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1),
"Stride values should be greater than or equal to 1.");
360 const unsigned int in_width = input_shape[width_idx];
361 const unsigned int in_height = input_shape[height_idx];
362 const unsigned int kernel_width = weights_shape[width_idx];
363 const unsigned int kernel_height = weights_shape[height_idx];
367 const unsigned int out_width = ((in_width - is_ceil) + strides.first - 1) / strides.first + is_ceil;
368 const unsigned int out_height = ((in_height - is_ceil) + strides.second - 1) / strides.second + is_ceil;
371 const int real_weight_width = (kernel_width - 1) * dilation.
x() + 1;
372 const int real_weight_height = (kernel_height - 1) * dilation.
y() + 1;
375 const int pad_width = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
376 const int pad_height = std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
379 const unsigned int pad_left = pad_width / 2;
380 const unsigned int pad_top = pad_height / 2;
381 const unsigned int pad_right = pad_width - pad_left;
382 const unsigned int pad_bottom = pad_height - pad_top;
384 PadStrideInfo same_info(strides.first, strides.second, pad_left, pad_right, pad_top, pad_bottom, rounding_type);
387 const auto out_dims =
scaled_dimensions(in_width, in_height, kernel_width, kernel_height, same_info, dilation);
395 unsigned int kernel_width,
unsigned int kernel_height,
398 const unsigned int pad_left = pad_stride_info.
pad_left();
399 const unsigned int pad_top = pad_stride_info.
pad_top();
400 const unsigned int pad_right = pad_stride_info.
pad_right();
401 const unsigned int pad_bottom = pad_stride_info.
pad_bottom();
402 const unsigned int stride_x = pad_stride_info.
stride().first;
403 const unsigned int stride_y = pad_stride_info.
stride().second;
408 const int w = stride_x * (in_width - 1) + kernel_width - (pad_left + pad_right);
409 const int h = stride_y * (in_height - 1) + kernel_height - (pad_top + pad_bottom);
411 return std::make_pair<unsigned int, unsigned int>(
w, h);
415 int kernel_width,
int kernel_height,
419 const int dilation_x = dilation.
x();
420 const int dilation_y = dilation.
y();
421 const int pad_left = pad_stride_info.
pad_left();
422 const int pad_top = pad_stride_info.
pad_top();
423 const int pad_right = pad_stride_info.
pad_right();
424 const int pad_bottom = pad_stride_info.
pad_bottom();
425 const int stride_x = pad_stride_info.
stride().first;
426 const int stride_y = pad_stride_info.
stride().second;
429 switch(pad_stride_info.
round())
432 w =
static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
433 h =
static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
436 w =
static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
437 h =
static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
445 return std::make_pair(static_cast<unsigned int>(w), static_cast<unsigned int>(h));
449 int kernel_width,
int kernel_height,
452 const int pad_left = pad_stride_info.
pad_left();
453 const int pad_top = pad_stride_info.
pad_top();
454 const int pad_right = pad_stride_info.
pad_right();
455 const int pad_bottom = pad_stride_info.
pad_bottom();
456 const int stride_x = pad_stride_info.
stride().first;
457 const int stride_y = pad_stride_info.
stride().second;
460 switch(pad_stride_info.
round())
463 w =
static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
464 h =
static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
467 w =
static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
468 h =
static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
474 return std::make_pair(static_cast<int>(w), static_cast<int>(h));
481 const bool is_first_dim = (axis == 0);
483 return !is_first_dim || is_min_max || is_quantized_type;
510 const auto a = act_info.
a();
511 const auto b = act_info.
b();
514 const auto type_max_value = std::get<1>(
get_min_max(data_type)).get<int32_t>();
519 return std::make_pair(min_activation, max_activation);
522 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensor *> tensors)
524 std::unordered_map<const ITensorInfo *, PaddingSize> res;
526 for(
const ITensor *tensor : tensors)
530 res.insert({ tensor->info(), tensor->info()->padding() });
537 std::unordered_map<const ITensorInfo *, PaddingSize>
get_padding_info(std::initializer_list<const ITensorInfo *> infos)
539 std::unordered_map<const ITensorInfo *, PaddingSize> res;
545 res.insert({
info,
info->padding() });
554 return std::find_if(padding_map.begin(), padding_map.end(), [](
const std::pair<const ITensorInfo *, PaddingSize> &padding_info)
556 return (padding_info.first->padding() != padding_info.second);
558 != padding_map.end();
561 #ifdef ARM_COMPUTE_ASSERTS_ENABLED 562 void print_consecutive_elements(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n,
int stream_width,
const std::string &element_delim)
568 print_consecutive_elements_impl<uint8_t>(s, ptr,
n, stream_width, element_delim);
574 print_consecutive_elements_impl<int8_t>(s,
reinterpret_cast<const int8_t *
>(ptr), n, stream_width, element_delim);
578 print_consecutive_elements_impl<uint16_t>(s,
reinterpret_cast<const uint16_t *
>(ptr), n, stream_width, element_delim);
582 print_consecutive_elements_impl<int16_t>(s,
reinterpret_cast<const int16_t *
>(ptr), n, stream_width, element_delim);
585 print_consecutive_elements_impl<uint32_t>(s,
reinterpret_cast<const uint32_t *
>(ptr), n, stream_width, element_delim);
588 print_consecutive_elements_impl<int32_t>(s,
reinterpret_cast<const int32_t *
>(ptr), n, stream_width, element_delim);
591 print_consecutive_elements_impl<bfloat16>(s,
reinterpret_cast<const bfloat16 *
>(ptr), n, stream_width, element_delim);
594 print_consecutive_elements_impl<half>(s,
reinterpret_cast<const half *
>(ptr), n, stream_width, element_delim);
597 print_consecutive_elements_impl<float>(s,
reinterpret_cast<const float *
>(ptr), n, stream_width, element_delim);
604 int max_consecutive_elements_display_width(std::ostream &s,
DataType dt,
const uint8_t *ptr,
unsigned int n)
610 return max_consecutive_elements_display_width_impl<uint8_t>(s, ptr,
n);
615 return max_consecutive_elements_display_width_impl<int8_t>(s,
reinterpret_cast<const int8_t *
>(ptr), n);
618 return max_consecutive_elements_display_width_impl<uint16_t>(s,
reinterpret_cast<const uint16_t *
>(ptr), n);
621 return max_consecutive_elements_display_width_impl<int16_t>(s,
reinterpret_cast<const int16_t *
>(ptr), n);
623 return max_consecutive_elements_display_width_impl<uint32_t>(s,
reinterpret_cast<const uint32_t *
>(ptr), n);
625 return max_consecutive_elements_display_width_impl<int32_t>(s,
reinterpret_cast<const int32_t *
>(ptr), n);
627 return max_consecutive_elements_display_width_impl<bfloat16>(s,
reinterpret_cast<const bfloat16 *
>(ptr), n);
629 return max_consecutive_elements_display_width_impl<half>(s,
reinterpret_cast<const half *
>(ptr), n);
631 return max_consecutive_elements_display_width_impl<float>(s,
reinterpret_cast<const float *
>(ptr), n);
BorderMode
Methods available to handle borders.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
Check if the given reduction operation should be handled in a serial way.
Class describing the value of a pixel for any image format.
A single plane of 32-bit macro pixel of U0, Y0, V0, Y1 byte.
InterpolationPolicy
Interpolation method.
Second channel (used by formats with unknown channel types).
Quantize using a fixed point multiplication.
DimensionRoundingType round() const
Get the rounding type.
quantized, symmetric fixed-point 16-bit number
ReductionOperation
Available reduction operations.
const std::string & string_from_channel(Channel channel)
Convert a channel identity into a string.
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
std::pair< unsigned int, unsigned int > deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, const PadStrideInfo &pad_stride_info)
Returns expected width and height of the deconvolution's output tensor.
Brain floating point representation class.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
float a() const
Get the alpha value.
void get(uint8_t &v) const
Interpret the pixel value as a U8.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
DimensionRoundingType
Dimension rounding type when down-scaling on CNNs.
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
std::stringstream ss(mlgo_str)
std::string string_from_pixel_value(const PixelValue &value, const DataType data_type)
Convert a PixelValue to a string, represented through the specific data type.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Store the tensor's metadata.
A 2 plane YUV format of Luma (Y) and interleaved UV data at 4:2:0 sampling.
QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
Returns output quantization information for softmax layer.
quantized, asymmetric fixed-point 16-bit number
1 channel, 1 U16 per channel
size_t x() const
Semantic accessor for width as x.
unsigned int pad_top() const
Get the top padding.
Output values are defined to match the source pixel whose center is nearest to the sample position...
const std::string & string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
Translates a given GEMMLowp output stage to a string.
std::string lower_string(const std::string &val)
Lower a given string.
Activation Layer Information class.
decltype(strategy::transforms) typedef type
Interface for CPU tensor.
A 2 plane YUV format of Luma (Y) and interleaved VU data at 4:2:0 sampling.
Copyright (c) 2017-2021 Arm Limited.
ActivationFunction
Available activation functions.
1 channel, 1 F16 per channel
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode...
const std::string & string_from_norm_type(NormType type)
Translates a given normalization type to a string.
const std::string & string_from_border_mode(BorderMode border_mode)
Translates a given border mode policy to a string.
1 channel, 1 S32 per channel
std::pair< int, int > scaled_dimensions_signed(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info)
Returns calculated width and height of output scaled tensor depending on dimensions rounding mode...
16-bit brain floating-point number
3 channels, 1 U8 per channel
std::string tolower(std::string string)
Convert string to lower case.
Quantization information.
std::string upper_string(const std::string &val)
Raise a given string to upper case.
const auto input_shape
Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given a...
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Exponential Linear Unit ( )
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::string read_file(const std::string &filename, bool binary)
Load an entire file in memory.
Third channel (used by formats with unknown channel types).
1 channel, 1 U32 per channel
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Normalization applied within the same map in 1D region.
Channel
Available channels.
Format
Image colour formats.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
Pooling Layer Information struct.
bool is_data_type_quantized_asymmetric_signed(DataType dt)
Check if a given data type is of asymmetric quantized signed type.
A 3 plane of 8 bit 4:4:4 sampled Y, U, V planes.
Fourth channel (used by formats with unknown channel types).
unsigned int pad_right() const
Get the right padding.
Quantize using a floating point multiplication.
Padding and stride information class.
void end(TokenStream &in, bool &valid)
GEMMLowpOutputStageType
GEMMLowp output stage type.
Quantize using an integer multiplication.
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
quantized, symmetric fixed-point 8-bit number
Num samples, channels, height, width.
size_t y() const
Semantic accessor for height as y.
quantized, symmetric per channel fixed-point 8-bit number
A 3 plane of 8-bit 4:2:0 sampled Y, U, V planes.
Lower and Upper Bounded Rectifier ( )
4 channels, 1 U8 per channel
PoolingType
Available pooling types.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Upper Bounded Rectifier ( )
PadStrideInfo pad_stride_info
Borders are left undefined.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Pixels outside the image are assumed to have the same value as the closest image pixel.
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo *> infos)
Stores padding information before configuring a kernel.
2 channel, 1 U8 per channel
bool is_pool_region_entirely_outside_input(const PoolingLayerInfo &info)
Check if the pool region is entirely outside the input tensor.
ActivationFunction activation() const
Get the type of activation function.
float b() const
Get the beta value.
quantized, asymmetric fixed-point 8-bit number signed
64-bit floating-point number
DataType data_type_from_name(const std::string &name)
Convert a string to DataType.
unsigned int pad_bottom() const
Get the bottom padding.
A single plane of 32-bit macro pixel of Y0, U0, Y1, V0 bytes.
DataType
Available data types.
unsigned int pad_left() const
Get the left padding.
DataLayout
[DataLayout enum definition]
const std::string & string_from_pooling_type(PoolingType type)
Translates a given pooling type to a string.
NormType
The normalization type used for the normalization layer.
Normalization applied cross maps.
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Normalization applied within the same map in 2D region.
PadStrideInfo calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout=DataLayout::NCHW, const Size2D &dilation=Size2D(1u, 1u), const DimensionRoundingType &rounding_type=DimensionRoundingType::FLOOR)
Calculate padding requirements in case of SAME padding.
const std::string & string_from_format(Format format)
Convert a tensor format into a string.