23.08
|
Go to the documentation of this file.
44 Status
validate_arguments(
const ITensorInfo *
input,
const ITensorInfo *input_squared,
const ITensorInfo *output,
const NormalizationLayerInfo &norm_info)
55 if(output->total_size() != 0)
68 : _func(nullptr), _input(nullptr), _input_squared(nullptr), _output(nullptr), _norm_info(
NormType::
IN_MAP_1D)
84 _input_squared = input_squared;
86 _norm_info = norm_info;
98 _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, true>;
102 _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, false>;
109 _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, true>;
113 _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, false>;
117 _func = &NENormalizationLayerKernel::normalize_float<float, 4, 2, false>;
124 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
133 _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, true>;
137 _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, false>;
144 _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, true>;
148 _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, false>;
152 _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 2, false>;
166 INEKernel::configure(win);
169 template <
typename T,
unsigned int S,
unsigned int dim,
bool do_2D_norm>
170 void NENormalizationLayerKernel::normalize_float(
const Window &window)
178 const auto window_start_x =
static_cast<int>(
window.
x().
start());
179 const auto window_end_x =
static_cast<int>(
window.
x().
end());
180 const int window_step_x = S;
183 Iterator input_squared(_input_squared, win);
187 const int radius = _norm_info.
norm_size() / 2;
199 auto sequential_normalization = [&](
const int x,
const Coordinates & id,
const int current_row,
const int first_row,
const int last_row,
const T * input_ptr,
const uint8_t *input_squared_start_ptr,
202 const int current_slice = dim == 0 ? x :
id[dim];
203 const int first_slice = std::max(current_slice - radius, 0);
204 const int last_slice = std::min(current_slice + radius, max_right);
206 const uint8_t *
const input_squared_x_ptr = input_squared_start_ptr + x * input_squared_stride_x;
208 auto accu =
static_cast<T
>(0.f);
209 for(
int j = first_row; j <= last_row; ++j)
212 const uint8_t *
const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
213 for(
int i = first_slice; i <= last_slice; ++i)
215 accu += *
reinterpret_cast<const T *
>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice);
220 const auto normalized = std::pow(accu *
static_cast<T
>(_norm_info.
scale_coeff()) +
static_cast<T
>(_norm_info.
kappa()), _norm_info.
beta());
221 const auto normalized_pixel = (*(input_ptr + x)) / normalized;
222 *(output_ptr + x) = normalized_pixel;
227 const auto input_ptr =
reinterpret_cast<const T *
>(
input.ptr());
228 auto output_ptr =
reinterpret_cast<T *
>(output.ptr());
231 const int current_row = do_2D_norm ?
id[dim_y] : 0;
232 const int first_row = do_2D_norm ? std::max(current_row - radius, 0) : 0;
233 const int last_row = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
235 int x = window_start_x;
237 for(; x < radius && x < window_end_x && dim == 0; ++x)
239 sequential_normalization(x,
id, current_row, first_row, last_row, input_ptr, input_squared.ptr(), output_ptr);
243 for(; x <= window_end_x - window_step_x - radius; x += window_step_x)
245 const int current_slice = dim == 0 ? x :
id[dim];
246 const int first_slice = std::max(current_slice - radius, 0);
247 const int last_slice = std::min(current_slice + radius, max_right);
249 const uint8_t *
const input_squared_x_ptr = input_squared.ptr() + x * input_squared_stride_x;
252 for(
int j = first_row; j <= last_row; ++j)
255 const uint8_t *
const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
256 for(
int i = first_slice; i <= last_slice; ++i)
258 accu =
wrapper::vadd(accu,
wrapper::vloadq(
reinterpret_cast<const T *
>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice)));
265 wrapper::vstore(
reinterpret_cast<T *
>(output_ptr + x), normalized_pixel);
269 for(; x < window_end_x; ++x)
271 sequential_normalization(x,
id, current_row, first_row, last_row, input_ptr, input_squared.ptr(), output_ptr);
274 input, input_squared, output);
@ NCHW
Num samples, channels, height, width.
Class to describe a number of elements in each dimension.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
constexpr int start() const
Return the start of the dimension.
float kappa() const
Get the kappa value.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
@ IN_MAP_1D
Normalization applied within the same map in 1D region.
static Status validate(const ITensorInfo *input, const ITensorInfo *input_squared, const ITensorInfo *output, NormalizationLayerInfo norm_info)
Static function to check if given info will lead to a valid configuration of NENormalizationLayerKern...
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
float32x2_t vinv(const float32x2_t &a)
float scale_coeff() const
Return the scaling factor of the normalization function.
Interface for CPU tensor.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
uint8x16_t vloadq(const uint8_t *ptr)
Includes all wrapper headers at once.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
float32x4_t vpow(const float32x4_t &a, const float32x4_t &b)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Create the appropriate SIMD vector given its type and size in terms of elements.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void configure(const ITensor *input, const ITensor *input_squared, ITensor *output, NormalizationLayerInfo norm_info)
Set the input and output tensors.
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Iterator updated by execute_window_loop for each window element.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
uint32_t norm_size() const
Get the normalization size.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
@ IN_MAP_2D
Normalization applied within the same map in 2D region.
uint8x8_t vmla(const uint8x8_t &a, const uint8x8_t &b, const uint8x8_t &c)
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Describe one of the image's dimensions with a start, end and step.
const Window & window() const
The maximum window the kernel can be executed on.
Information about executing thread and CPU.
void vstore(uint8_t *ptr, uint8x8_t val)
Describe a multidimensional execution window.
Normalization Layer Information class.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Copyright (c) 2017-2023 Arm Limited.
@ F16
16-bit floating-point number
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Store the tensor's metadata.
float beta() const
Get the beta value.
@ F32
32-bit floating-point number
NormType
The normalization type used for the normalization layer.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
unsigned int get_normalization_dimension_index(DataLayout layout, const NormalizationLayerInfo &info)
Calculate the normalization dimension index for a given normalization type.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
constexpr int end() const
Return the end of the dimension.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
NormType type() const
Get the normalization type.
NENormalizationLayerKernel()
Default constructor.
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)