23.08
|
Go to the documentation of this file.
49 struct BatchNormalizationSelectorData
54 using BatchNormalizationSelectorPtr = std::add_pointer<bool(
const BatchNormalizationSelectorData &data)>
::type;
55 using BatchNormalizationKernelPtr = std::add_pointer<void(ITensor *, ITensor *,
const ITensor *,
const ITensor *,
const ITensor *,
const ITensor *,
56 float, ActivationLayerInfo &,
const Window &)>
::type;
58 struct BatchNormalizationKernel
65 static const BatchNormalizationKernel available_kernels[] =
67 #if defined(ARM_COMPUTE_ENABLE_SVE)
69 "sve_fp16_batch_normalization",
70 [](
const BatchNormalizationSelectorData & data) {
return data.dt ==
DataType::F16 && data.ci.has_sve(); },
74 "sve_fp32_batch_normalization",
75 [](
const BatchNormalizationSelectorData & data) {
return data.dt ==
DataType::F32 && data.ci.has_sve(); },
79 #if defined(ARM_COMPUTE_ENABLE_NEON)
80 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
82 "neon_fp16_batch_normalization",
83 [](
const BatchNormalizationSelectorData & data) {
return data.dt ==
DataType::F16; },
88 "neon_fp32_batch_normalization",
89 [](
const BatchNormalizationSelectorData & data) {
return data.dt ==
DataType::F32; },
95 const BatchNormalizationKernel *get_implementation(
const BatchNormalizationSelectorData &data)
97 for(
const auto &uk : available_kernels)
99 if(uk.is_selected(data))
108 validate_arguments(
const ITensorInfo *
input,
const ITensorInfo *output,
const ITensorInfo *mean,
const ITensorInfo *var,
109 const ITensorInfo *beta,
const ITensorInfo *gamma,
float epsilon, ActivationLayerInfo
act_info)
113 const auto *uk = get_implementation(BatchNormalizationSelectorData{
input->data_type(),
CPUInfo::get() });
120 && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
121 && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
125 if(
nullptr != output)
150 template <
typename T,
bool fused_activation,
typename F>
151 void NEBatchNormalizationLayerKernel::batch_normalization_nchw(
const Window &window)
154 using ExactTagType =
typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
156 const int window_step_x = 16 /
sizeof(T);
157 const auto window_start_x =
static_cast<int>(
window.
x().
start());
158 const auto window_end_x =
static_cast<int>(
window.
x().
end());
160 Window win_to_use =
window;
163 Iterator
input(_input, win_to_use);
164 Iterator output(_output, win_to_use);
166 F activation_functor(_act_info);
172 const auto input_mean =
reinterpret_cast<const T *
>(_mean->
ptr_to_element(Coordinates(0, 0)));
173 const auto input_var =
reinterpret_cast<const T *
>(_var->
ptr_to_element(Coordinates(0, 0)));
174 const auto input_gamma = (_gamma !=
nullptr) ?
reinterpret_cast<const T *
>(_gamma->
ptr_to_element(Coordinates(0, 0))) :
nullptr;
175 const auto input_beta = (_beta !=
nullptr) ?
reinterpret_cast<const T *
>(_beta->
ptr_to_element(Coordinates(0, 0))) :
nullptr;
177 T mean =
static_cast<T
>(0);
178 T var =
static_cast<T
>(0);
179 T gamma =
static_cast<T
>(1);
180 T beta =
static_cast<T
>(0);
181 T denominator =
static_cast<T
>(0);
188 const auto epsilon_vec =
wrapper::vdup_n(
static_cast<T
>(_epsilon), ExactTagType{});
191 const auto input_ptr =
reinterpret_cast<const T *
>(
input.ptr());
192 const auto output_ptr =
reinterpret_cast<T *
>(output.ptr());
196 mean = input_mean[
id.z()];
197 var = input_var[
id.z()];
200 if(input_gamma !=
nullptr)
202 gamma = input_gamma[
id.z()];
205 if(input_beta !=
nullptr)
207 beta = input_beta[
id.z()];
218 int x = window_start_x;
219 for(; x <= (window_end_x - window_step_x); x += window_step_x)
223 const auto x_bar =
wrapper::vmul(numerator, denominator_vec);
229 activation_functor(res);
237 for(; x < window_end_x; ++x)
239 const T numerator = input_ptr[x] - mean;
240 const T x_bar = numerator * denominator;
241 T res = beta + x_bar * gamma;
246 activation_functor(res);
250 *(output_ptr + x) = res;
256 void NEBatchNormalizationLayerKernel::configure_non_fused()
260 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
262 _func = &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float16_t, false, detail::dummy<float16_t, 8>>;
264 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
266 _func = &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float, false, detail::dummy<float, 4>>;
274 void NEBatchNormalizationLayerKernel::configure_fused()
277 static std::map<ActivationLayerInfo::ActivationFunction, BatchNormFunctionPtr> bn_fused_map_f32_nchw =
279 { ActivationLayerInfo::ActivationFunction::RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float, true, detail::relu<float, 4>> },
280 { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float, true, detail::brelu<float, 4>> },
281 { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float, true, detail::lubrelu<float, 4>> }
283 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
285 static std::map<ActivationLayerInfo::ActivationFunction, BatchNormFunctionPtr> bn_fused_map_f16_nchw =
287 { ActivationLayerInfo::ActivationFunction::RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float16_t, true, detail::relu<float16_t, 8>> },
288 { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float16_t, true, detail::brelu<float16_t, 8>> },
289 { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &NEBatchNormalizationLayerKernel::batch_normalization_nchw<float16_t, true, detail::lubrelu<float16_t, 8>> }
291 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
295 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
297 _func = bn_fused_map_f16_nchw[_act_info.
activation()];
299 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
301 _func = bn_fused_map_f32_nchw[_act_info.
activation()];
310 : _func(nullptr), _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _gamma(nullptr), _beta(nullptr), _epsilon(), _act_info()
323 (beta !=
nullptr) ? beta->
info() :
nullptr,
324 (gamma !=
nullptr) ? gamma->
info() :
nullptr,
336 const bool run_in_place = (output ==
nullptr) || (output ==
input);
352 configure_non_fused();
358 INEKernel::configure(win);
360 if(output !=
nullptr)
392 uk->ukernel(_input, _output, _mean, _var, _beta, _gamma, _epsilon, _act_info,
window);
Class to describe a number of elements in each dimension.
@ NCHW
Num samples, channels, height, width.
NEBatchNormalizationLayerKernel()
Default constructor.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
constexpr int start() const
Return the start of the dimension.
decltype(strategy::transforms) typedef type
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
uint8x8_t vsub(const uint8x8_t &a, const uint8x8_t &b)
BatchNormalizationKernelPtr ukernel
arm_compute::ActivationFunction ActivationFunction
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
float32x2_t vinvsqrt(const float32x2_t &a)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta=nullptr, const ITensor *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Set the input and output tensors.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
static CPUInfo & get()
Access the KernelLibrary singleton.
Interface for CPU tensor.
#define REGISTER_FP16_NEON(func_name)
uint8x16_t vloadq(const uint8_t *ptr)
ActivationFunction activation() const
Get the type of activation function.
Includes all wrapper headers at once.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Activation Layer Information class.
#define REGISTER_FP32_NEON(func_name)
void fp32_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, ActivationLayerInfo &act_info, const Window &window)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
#define REGISTER_FP32_SVE(func_name)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
void fp16_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, ActivationLayerInfo &act_info, const Window &window)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
bool enabled() const
Check if initialised.
void fp16_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, ActivationLayerInfo &act_info, const Window &window)
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
const BatchNormalizationSelectorPtr is_selected
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
uint8x8_t vmla(const uint8x8_t &a, const uint8x8_t &b, const uint8x8_t &c)
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
const Window & window() const
The maximum window the kernel can be executed on.
Information about executing thread and CPU.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
void vstore(uint8_t *ptr, uint8x8_t val)
Describe a multidimensional execution window.
Copyright (c) 2017-2023 Arm Limited.
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
#define REGISTER_FP16_SVE(func_name)
@ F16
16-bit floating-point number
void fp32_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, ActivationLayerInfo &act_info, const Window &window)
Store the tensor's metadata.
@ F32
32-bit floating-point number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
constexpr int end() const
Return the end of the dimension.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
DataType
Available data types.
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta=nullptr, const ITensorInfo *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEBatchNormalizationLaye...
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)