Compute Library
 21.02
NEBatchNormalizationLayer.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEBATCHNORMALIZATIONLAYER_H
25 #define ARM_COMPUTE_NEBATCHNORMALIZATIONLAYER_H
26 
27 #include "arm_compute/core/Types.h"
30 
31 #include <memory>
32 
33 namespace arm_compute
34 {
35 class ITensor;
36 class NEBatchNormalizationLayerKernel;
37 
38 /** Basic function to run @ref NENormalizationLayerKernel and simulate a batch normalization layer.
39  *
40  * Batch normalization is calculated by:
41  * @f[ out_i = \gamma * (\frac{in_i - \mu_{B}}{\sqrt{\sigma^2_{B} + \epsilon}}) + \beta \equiv BN_{\gamma,\beta}(in_i) @f]
42  *
43  */
45 {
46 public:
47  /** Constructor */
49  /** Prevent instances of this class from being copied (As this class contains pointers) */
51  /** Prevent instances of this class from being copied (As this class contains pointers) */
53  /** Prevent instances of this class from being moved (As this class contains non movable objects) */
55  /** Prevent instances of this class from being moved (As this class contains non movable objects) */
57  /** Default destructor */
59  /** Set the input and output tensors.
60  *
61  * @note If the output tensor is a nullptr or is equal to the input, the batch normalization function will be performed in-place
62  *
63  * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
64  * 3 lower dimensions represent a single input with dimensions [width, height, FM].
65  * The rest are optional and used for representing batches. Data types supported: F16/F32.
66  * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
67  * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
68  * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
69  * @param[in] beta (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
70  * @param[in] gamma (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
71  * @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
72  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
73  */
74  void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta = nullptr, const ITensor *gamma = nullptr, float epsilon = 0.001f,
76  /** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayer
77  *
78  * @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
79  * 3 lower dimensions represent a single input with dimensions [width, height, FM].
80  * The rest are optional and used for representing batches. Data types supported: F16/F32.
81  * @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
82  * @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
83  * @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
84  * @param[in] beta (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
85  * @param[in] gamma (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
86  * @param[in] epsilon (Optional) Small value to avoid division with zero. Default value is 0.001f.
87  * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
88  *
89  * @return a status
90  */
91  static Status validate(const ITensorInfo *input, const ITensorInfo *output,
92  const ITensorInfo *mean, const ITensorInfo *var,
93  const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
94  float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
95 
96  // Inherited methods overridden:
97  void run() override;
98 
99 private:
100  std::unique_ptr<NEBatchNormalizationLayerKernel> _norm_kernel; /**< Batch normalization layer kernel */
101 };
102 }
103 #endif /* ARM_COMPUTE_NEBATCHNORMALIZATIONLAYER_H */
Base class for all functions.
Definition: IFunction.h:30
~NEBatchNormalizationLayer()
Default destructor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1550
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta=nullptr, const ITensorInfo *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEBatchNormalizationLaye...
Basic function to run NENormalizationLayerKernel and simulate a batch normalization layer...
NEBatchNormalizationLayer & operator=(const NEBatchNormalizationLayer &)=delete
Prevent instances of this class from being copied (As this class contains pointers) ...
void run() override
Run the kernels contained in the function.
void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta=nullptr, const ITensor *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Set the input and output tensors.